Compare commits

..

49 Commits

Author SHA1 Message Date
00e8e7d116 vault backup: 2025-06-07 19:56:31 2025-06-07 19:56:31 +02:00
cd5e3ec089 vault backup: 2025-06-07 19:46:16 2025-06-07 19:46:16 +02:00
befa559bbb vault backup: 2025-06-07 18:54:39 2025-06-07 18:54:40 +02:00
7b8245d7b5 vault backup: 2025-06-07 18:30:45 2025-06-07 18:30:45 +02:00
d1ef02be6e vault backup: 2025-05-09 18:08:05 2025-05-09 18:08:05 +02:00
a5771a6e1d vault backup: 2025-05-06 13:00:52 2025-05-06 13:00:52 +02:00
d4dd7b142d vault backup: 2025-05-05 20:59:28 2025-05-05 20:59:28 +02:00
1b4bee229f vault backup: 2025-05-05 20:41:16 2025-05-05 20:41:16 +02:00
0dcf0496d7 vault backup: 2025-05-05 19:59:16 2025-05-05 19:59:16 +02:00
bf04a00d10 vault backup: 2025-05-05 16:48:06 2025-05-05 16:48:06 +02:00
464477c358 vault backup: 2025-05-05 16:12:23 2025-05-05 16:12:23 +02:00
a598cca70d vault backup: 2025-05-05 15:05:48 2025-05-05 15:05:48 +02:00
b15c18b00e vault backup: 2025-05-05 13:55:38 2025-05-05 13:55:38 +02:00
9910c7737f vault backup: 2025-05-05 05:01:17 2025-05-05 05:01:17 +02:00
aa6547aa28 vault backup: 2025-05-05 04:53:58 2025-05-05 04:53:58 +02:00
df6a389528 vault backup: 2025-05-05 04:21:09 2025-05-05 04:21:09 +02:00
660fc61bcf vault backup: 2025-05-02 18:58:09 2025-05-02 18:58:09 +02:00
0553b398fc vault backup: 2025-05-02 18:57:49 2025-05-02 18:57:49 +02:00
73a62f4edd vault backup: 2025-05-02 18:51:24 2025-05-02 18:51:24 +02:00
40a3d4e103 vault backup: 2025-04-25 16:16:13 2025-04-25 16:16:13 +02:00
3685aee1bd vault backup: 2025-04-23 15:41:23 2025-04-23 15:41:23 +02:00
a4a5486252 vault backup: 2025-04-21 22:49:53 2025-04-21 22:49:53 +02:00
e87d3b26a1 vault backup: 2025-04-21 22:13:39 2025-04-21 22:13:39 +02:00
ba1366cb79 vault backup: 2025-04-21 21:20:06 2025-04-21 21:20:06 +02:00
29f7c07e4c os 2025-04-19 13:36:03 +02:00
ffeccca723 vault backup: 2025-04-19 13:35:29 2025-04-19 13:35:29 +02:00
dcecfda4fc vault backup: 2025-04-14 20:58:47 2025-04-14 20:58:47 +02:00
4caa0e4c24 vault backup: 2025-04-14 19:58:43 2025-04-14 19:58:43 +02:00
6d08e555c2 vault backup: 2025-04-14 19:38:20 2025-04-14 19:38:20 +02:00
3c6cafc8ff vault backup: 2025-04-14 18:57:30 2025-04-14 18:57:30 +02:00
d7a17f5c9b vault backup: 2025-03-17 20:11:53 2025-03-17 20:11:53 +01:00
8bf5195059 vault backup: 2025-02-27 16:29:47 2025-02-27 16:29:47 +01:00
f5bf5e4f3e vault backup: 2025-02-20 16:21:47 2025-02-20 16:21:47 +01:00
88c988e284 vault backup: 2025-02-20 16:00:23 2025-02-20 16:00:23 +01:00
7ed288dd9c vault backup: 2025-02-20 15:48:21 2025-02-20 15:48:21 +01:00
680fb334ac vault backup: 2025-02-20 15:08:09 2025-02-20 15:08:09 +01:00
d62b2f8867 vault backup: 2025-02-20 14:57:00 2025-02-20 14:57:00 +01:00
7332fea8bf vault backup: 2025-02-14 15:29:55 2025-02-14 15:29:55 +01:00
fb35dd1020 vault backup: 2025-02-13 16:10:19 2025-02-13 16:10:19 +01:00
20b172d455 vault backup: 2025-02-12 15:04:15 2025-02-12 15:04:15 +01:00
672268d6e7 vault backup: 2025-02-10 16:39:29 2025-02-10 16:39:29 +01:00
22c70ee735 vault backup: 2025-02-09 13:48:28 2025-02-09 13:48:28 +01:00
275a3cbaaf vault backup: 2025-02-04 14:31:08 2025-02-04 14:31:09 +01:00
d42935aa65 vault backup: 2025-02-04 13:03:51 2025-02-04 13:03:51 +01:00
14b3afb020 vault backup: 2025-02-04 12:36:44 2025-02-04 12:36:44 +01:00
03342ea07a vault backup: 2025-02-04 12:03:53 2025-02-04 12:03:53 +01:00
f728914cdb vault backup: 2025-02-04 11:07:38 2025-02-04 11:07:38 +01:00
e768b3c766 Finished lecture 1 2025-02-04 10:46:13 +01:00
d0b7c1fd65 vault backup: 2025-02-04 10:18:25 2025-02-04 10:18:25 +01:00
91 changed files with 36203 additions and 2483 deletions

3
.obsidian/app.json vendored
View File

@ -8,5 +8,6 @@
},
"alwaysUpdateLinks": true,
"useMarkdownLinks": true,
"promptDelete": false
"promptDelete": false,
"readableLineLength": false
}

View File

@ -3,6 +3,7 @@
"accentColor": "#efb9fd",
"theme": "obsidian",
"enabledCssSnippets": [
"dark_pdf"
"dark_pdf",
"images"
]
}

10
.obsidian/bookmarks.json vendored Normal file
View File

@ -0,0 +1,10 @@
{
"items": [
{
"type": "file",
"ctime": 1738664322117,
"path": "Extracurricular/Misc/Ideas.md",
"title": "Ideas"
}
]
}

View File

@ -5,7 +5,6 @@
"obsidian-icon-folder",
"obsidian-advanced-slides",
"obsidian-annotator",
"obsidian-markmind",
"obsidian-wakatime",
"omnisearch",
"obsidian-enhancing-export",
@ -15,5 +14,6 @@
"emoji-shortcodes",
"advanced-canvas",
"obsidian-tracker",
"better-export-pdf"
"better-export-pdf",
"obsidian-mind-map"
]

View File

@ -26,5 +26,6 @@
"workspaces": false,
"file-recovery": true,
"publish": false,
"sync": false
"sync": false,
"webviewer": false
}

16
.obsidian/graph.json vendored
View File

@ -5,7 +5,7 @@
"showAttachments": false,
"hideUnresolved": false,
"showOrphans": false,
"collapse-color-groups": true,
"collapse-color-groups": false,
"colorGroups": [
{
"query": "[\"type\":theoretical]",
@ -38,14 +38,14 @@
],
"collapse-display": false,
"showArrow": false,
"textFadeMultiplier": 0.2,
"nodeSizeMultiplier": 1.22291666666667,
"lineSizeMultiplier": 1.60572916666667,
"collapse-forces": true,
"centerStrength": 0.518713248970312,
"repelStrength": 10,
"textFadeMultiplier": -0.4,
"nodeSizeMultiplier": 1.60572916666667,
"lineSizeMultiplier": 2.80520833333333,
"collapse-forces": false,
"centerStrength": 0.442708333333333,
"repelStrength": 17.9166666666667,
"linkStrength": 1,
"linkDistance": 250,
"scale": 0.9999999999999991,
"scale": 0.44649054406822863,
"close": true
}

View File

@ -0,0 +1,21 @@
{
"port": "3000",
"autoReload": true,
"exportDirectory": "/export",
"enableChalkboard": false,
"enableOverview": false,
"enableMenu": false,
"enablePointer": false,
"enableTimeBar": false,
"theme": "black",
"highlightTheme": "zenburn",
"transition": "slide",
"transitionSpeed": "default",
"controls": true,
"progress": true,
"slideNumber": false,
"showGrid": false,
"autoComplete": "inPreview",
"paneMode": "split",
"motm": "2025-04-21T19:43:27.198Z"
}

View File

@ -6,11 +6,11 @@
"emojiStyle": "native",
"iconColor": null,
"recentlyUsedIcons": [
"LiInfo",
"🤖",
"🖥",
"LiComputer",
"LiSatellite",
"LiBookOpenCheck",
"📖"
"LiSatellite"
],
"recentlyUsedIconsSize": 5,
"rules": [],
@ -89,5 +89,7 @@
"iconName": "LiComputer",
"iconColor": "#3df2ff"
},
"Operating Systems": "🖥"
"Operating Systems": "🖥",
"Introduction to Machine Learning": "🤖",
"Introduction to Machine Learning/Introductory lecture.md": "LiInfo"
}

View File

@ -1,18 +0,0 @@
{
"canvasSize": 8000,
"headLevel": 2,
"fontSize": 16,
"background": "transparent",
"layout": "mindmap",
"layoutDirect": "mindmap",
"protocol": "jump-to-pdf",
"registerPdfEvent": true,
"viewerTheme": 2,
"mindmapmode": "basic",
"parseMindMap": true,
"annotateTop": 0,
"annotateBottom": 0,
"useCustomShortcut": false,
"highlightFormat": "\nPage:{{page}}\n<span style=\"color:rgb({{color}})\">■</span>:{{highlightText}}\nComment:{{comment}}\n[📌]({{link}})\n^{{id}}\n",
"uid": "00p01100q00t01300o01300r01300t01200o013"
}

File diff suppressed because one or more lines are too long

View File

@ -1,10 +0,0 @@
{
"id": "obsidian-markmind",
"name": "Markmind",
"version": "3.0.2",
"minAppVersion": "0.9.12",
"description": "This is a mindmap , outline tool for obsidian.",
"author": "Mark",
"authorUrl": "https://github.com/MarkMindCkm/obsidian-markmind",
"isDesktopOnly": false
}

File diff suppressed because one or more lines are too long

32631
.obsidian/plugins/obsidian-mind-map/main.js vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,8 @@
{
"id": "obsidian-mind-map",
"name": "Mind Map",
"version": "1.1.0",
"description": "A plugin to preview notes as Markmap mind maps",
"isDesktopOnly": false,
"js": "main.js"
}

9
.obsidian/snippets/images.css vendored Normal file
View File

@ -0,0 +1,9 @@
img {
display: block;
margin-left: auto;
margin-right: auto;
max-width:500px;
}
div.mermaid {
text-align: center;
}

View File

@ -22,6 +22,8 @@
"excalidraw-css": "text",
"excalidraw-autoexport": "text",
"excalidraw-embeddable-theme": "text",
"excalidraw-open-md": "checkbox"
"excalidraw-open-md": "checkbox",
"backlinks": "multitext",
"mermaid": "multitext"
}
}

View File

@ -4,53 +4,42 @@
"type": "split",
"children": [
{
"id": "91b1494cd808f86c",
"id": "31174fbd9a68be49",
"type": "tabs",
"children": [
{
"id": "cd8e3009ae40c051",
"id": "f9fe04cad473d20c",
"type": "leaf",
"state": {
"type": "markdown",
"state": {
"file": "Extracurricular/satQuest/Meeting Dec 18.md",
"file": "Operating Systems/Mass Storage.md",
"mode": "source",
"source": false
},
"icon": "lucide-file",
"title": "Meeting Dec 18"
"title": "Mass Storage"
}
},
{
"id": "3dbe2cc922c4bee2",
"id": "5e5f715713d29cb8",
"type": "leaf",
"state": {
"type": "markdown",
"type": "canvas",
"state": {
"file": "Software Engineering/Introductory Lecture.md",
"mode": "source",
"source": false
},
"icon": "lucide-file",
"title": "Introductory Lecture"
"file": "Extracurricular/Plag/Untitled.canvas",
"viewState": {
"x": -354.95061823931576,
"y": 2689.4480955935114,
"zoom": 0.7529071380352278
}
},
{
"id": "1ac4561fed3e4c16",
"type": "leaf",
"state": {
"type": "markdown",
"state": {
"file": "conflict-files-obsidian-git.md",
"mode": "source",
"source": false
},
"icon": "lucide-file",
"title": "conflict-files-obsidian-git"
"icon": "lucide-layout-dashboard",
"title": "Untitled"
}
}
],
"currentTab": 2
"currentTab": 1
}
],
"direction": "vertical"
@ -69,7 +58,8 @@
"state": {
"type": "file-explorer",
"state": {
"sortOrder": "alphabetical"
"sortOrder": "alphabetical",
"autoReveal": false
},
"icon": "lucide-folder-closed",
"title": "Files"
@ -209,8 +199,7 @@
}
],
"direction": "horizontal",
"width": 200,
"collapsed": true
"width": 200
},
"left-ribbon": {
"hiddenItems": {
@ -227,61 +216,61 @@
"omnisearch:Omnisearch": false
}
},
"active": "1ac4561fed3e4c16",
"active": "5e5f715713d29cb8",
"lastOpenFiles": [
"Software Engineering/Introductory Lecture.md",
"conflict-files-obsidian-git.md",
"Untitled.md",
"Statistics and Probability/Mock exam run 1.md",
"Functional Programming/Proofs.md",
"Operating Systems",
"Statistics and Probability/Support Lecture.md",
"Software Engineering",
"Pasted image 20250113151159.png",
"Extracurricular/Misc/Ideas.md",
"Functional Programming/Drawing 2024-12-24 17.52.22.excalidraw.md",
"Extracurricular/satQuest/Meeting Dec 18.md",
"Extracurricular/satQuest/img/Pasted image 20241218122110.png",
"Excalidraw/Drawing 2024-12-11 23.27.51.excalidraw.md",
"Discrete Structures/Midterm/attempt 2.md",
"Discrete Structures/Recurrence relations.md",
"Discrete Structures/Midterm/attempt 1.md",
"Discrete Structures/Counting.md",
"Discrete Structures/Midterm/Untitled.md",
"Discrete Structures/Midterm/Midterm prep.md",
"Discrete Structures/Midterm",
"Extracurricular/satQuest/img/Pasted image 20241206134156.png",
"Extracurricular/satQuest/Parts Proposal.md",
"Operating Systems/Mass Storage.md",
"Extracurricular/Plag/Untitled.canvas",
"Extracurricular/Plag/AI Checker.md",
"Extracurricular/Plag",
"Extracurricular/AI Checker.md",
"Operating Systems/Memory Management.md",
"Operating Systems/File Systems Management.md",
"Operating Systems/Virtual Memory.md",
"Operating Systems/Virtualization.md",
"Operating Systems/assets/Pasted image 20250505205120.png",
"Operating Systems/assets/Pasted image 20250505203114.png",
"Operating Systems/assets/Pasted image 20250505202532.png",
"Operating Systems/assets/Pasted image 20250505201950.png",
"Operating Systems/assets/Pasted image 20250505201859.png",
"Operating Systems/Input Output.md",
"Operating Systems/Inter-Process Communication.md",
"README.md",
"Operating Systems/assets/Pasted image 20250505195901.png",
"Operating Systems/assets/Pasted image 20250505201102.png",
"Operating Systems/assets/Pasted image 20250505200548.png",
"Operating Systems/assets/Pasted image 20250505200028.png",
"Operating Systems/assets/Pasted image 20250505194426.png",
"Operating Systems/Processes and Threads.md",
"Operating Systems/Scheduling.md",
"Operating Systems/Overview.md",
"Untitled.canvas",
"Discrete Structures/Midterm/attempt 2.md",
"Discrete Structures/Mathematical Data Structures.md",
"Discrete Structures/Relations and Digraphs.md",
"Discrete Structures/Mathematical Proofs (Induction).md",
"Advanced Algorithms/Graphs.md",
"Advanced Algorithms/P vs. NP.md",
"Advanced Algorithms/Pasted image 20241203234600.png",
"Excalidraw",
"Extracurricular/satQuest/Initial Meeting.md",
"Extracurricular/satQuest/img/Pasted image 20241206134213.png",
"Extracurricular/satQuest/img/Pasted image 20241206134207.png",
"Extracurricular/satQuest/img/Pasted image 20241206133007.png",
"Extracurricular/satQuest/img",
"Advanced Algorithms/Recurrence relations.md",
"Advanced Algorithms/Graph Algorithms.md",
"unicef.org.md",
"Linear Algebra/Matrices.md",
"Advanced Algorithms/assets/pnp/Pasted image 20241203234013.png",
"Advanced Algorithms/assets/pnp/Pasted image 20241203234032.png",
"Advanced Algorithms/assets/graph/1_WR4AtjT_nhwSOtAW99Yd5g.gif",
"Advanced Algorithms/assets/pnp",
"Advanced Algorithms/assets/graph",
"Advanced Algorithms/practicals",
"Statistics and Probability/R",
"Discrete Structures/img",
"Languages & Machines/Regular languages.md",
"Extracurricular/Circuitree/Shitter Zero/Timeline.md",
"Extracurricular/Circuitree/Shitter Zero",
"Discrete Structures/Mathematical Proofs (Induction).md",
"Discrete Structures/Recurrence relations.md",
"Discrete Structures/Relations and Digraphs.md",
"Discrete Structures/Counting.md",
"Software Engineering/Meeting.md",
"Software Engineering/Reqirements.md",
"Languages & Machines/assets",
"Languages & Machines",
"Extracurricular/Misc/Proposed Routine Plan.canvas",
"Extracurricular/Circuitree/Antenna Building/Untitled",
"Extracurricular/Circuitree/Antenna Building",
"Software Engineering",
"Operating Systems/assets",
"Operating Systems",
"Introduction to Machine Learning/assets",
"Web Engineering/canvae/server_client.canvas",
"Advanced Programming/projects/second/Refactoring.canvas",
"Advanced Programming/assets/assignment/assignment_organization.canvas",
"Advanced Programming/assets/spring/Beans.canvas",
"Advanced Programming/assets/assignment/assignment_app.canvas",
"Extracurricular/Misc/Proposed Routine Plan.canvas",
"Extracurricular/Circuitree/Committee Market/discussion/Proposed showcase infra.canvas"
]
}

View File

@ -0,0 +1,18 @@
```mermaid
sequenceDiagram
Prototype ->> Order: Meeting with everyone
Order ->> Prepare: Not everyone has to be present
```
## Next week
- Early, need to organize a meeting to build the antenna
- Purchase the materials right after
- Schedule the event
## All times
- Create slides

View File

@ -0,0 +1,94 @@
---
theme: beige
highlightTheme: css/vs2015.css
---
<div style="width:100%; transform:scale(1.1)">
```mermaid
sequenceDiagram
Prototyping ->> Order: We need to come up with a working prototype by next week
Order ->> Code: ITM - decide the scope (more or less)
Order ->> Design: Case? No case?
Design ->> Presentation: Slides
```
</div>
---
## Heads up!
we got two fucking weeks
---
## Features
| Part | Function |
| ------------------------------------------ | -------------- |
| NFC/RFID transceiver | Copying cards |
| 433 MHz transceiver | Replay attacks |
| HID (RP2040 and **ESP32-C3** support this) | Rubber ducky |
==NOTE THAT ESP32 BASIC DOESN'T==
---
### Anything else come to mind?<sup>*</sup>
<sub>* Keep in mind that we have 2 fucking weeks</sub>
---
### RP2040
- We can reuse the macro pad schematic!
- Seems like a little bit of an advanced project for it, might be slow
### ESP32
- We can do network stuff!
- Doesn't seem much more difficult than the RP2040.
---
## Design
- Screen (we got 50)
- MCU
- Antenna (used by both the NFC and 433Mhz tranceivers)
- The transceivers
Terminated by a male USB-A
### Should we include a case?
This question is mostly for Mihai as it involves him designing it.
---
<div style="width:100%;">
<img src="shitter.png" />
</div>
---
## Code
Let's discuss a potential scope for the project!
- Do we **only** provide bindings for the sensors?
- Will the attendees be building anything? If yes, **what**?
- C? Micropython?
---
## Let's distribute!
If we want to achieve *anything* given the short timeframe[^1], we're gonna need to copy the GAPC model.
**GitHub issues!**
<small> 1 (CircuitRee trademarked statement) </small>
---
# We got this!

Binary file not shown.

After

Width:  |  Height:  |  Size: 246 KiB

View File

@ -1,7 +0,0 @@
## Prelims
No fancy shit
## BAPC

View File

@ -1,3 +0,0 @@
### Tasks

View File

@ -1,2 +1,5 @@
1. Anti-AI extension -> Data -> Bachelor's thesis
2. Temmies calendar integration (background ical service)
2. Writing (come on dude, start already)
3. Temmies calendar integration (background ical service)
4. o.pm bangle.js reboot: Backend in Rust, frontend grafana?
5. Markdown parser in Haskell

View File

@ -1,38 +1,283 @@
{
"nodes":[
{"id":"163ca037f71a0b6f","type":"group","x":-320,"y":115,"width":838,"height":865,"color":"4","label":"Health"},
{"id":"07a053d6dd1d68a0","type":"group","x":640,"y":334,"width":596,"height":427,"color":"6","label":"Productivity"},
{"id":"5159e3993f95efcb","type":"text","text":"Avoid bad habits (3)","x":-138,"y":135,"width":183,"height":60},
{"id":"3b1083b1e372d498","type":"text","text":"Reading (4, 7)","x":42,"y":417,"width":170,"height":60},
{"id":"f897f4ba690283db","type":"text","text":"Sleep Schedule Improvement (1)","x":-189,"y":600,"width":295,"height":50},
{"id":"3aa369dab5e52be1","type":"text","text":"Better Social (9)","x":-215,"y":900,"width":186,"height":60},
{"id":"8dbfb266e9ae4898","type":"text","text":"Physical health routine (2) becomes manageable ","x":-7,"y":900,"width":263,"height":60},
{"id":"1a3aafd43063deaf","type":"text","text":"Schedule better","x":-129,"y":740,"width":176,"height":60},
{"id":"c53e69212bf4751a","type":"text","text":"Eating better (8)","x":287,"y":800,"width":181,"height":60},
{"id":"2db72cbff0c7ee4d","type":"text","text":"Better communication and reliability","x":101,"y":711,"width":222,"height":60},
{"id":"a07ffd84eb97e076","type":"text","text":"Good habits","x":106,"y":264,"width":150,"height":60},
{"id":"6dc127fe4098cce8","type":"text","text":"Enjoy hobbies during day (5)","x":256,"y":417,"width":242,"height":60},
{"id":"0bc6527818df845c","type":"text","text":"Sticking to schedule (6)","x":660,"y":500,"width":250,"height":60},
{"id":"2b76b4366e7d7447","type":"text","text":"Consistency in terms of Uni","x":713,"y":354,"width":280,"height":60},
{"id":"7b69dd32a9ff908b","type":"text","text":"Using the same [[Plan#Proven studying tactics | Study tactics]]","x":933,"y":500,"width":283,"height":60},
{"id":"37abaca3fa89fb9b","type":"text","text":"Better mental and productivity","x":799,"y":681,"width":294,"height":60}
"nodes": [
{
"id": "163ca037f71a0b6f",
"type": "group",
"styleAttributes": {},
"x": -320,
"y": 115,
"width": 838,
"height": 865,
"color": "4",
"label": "Health"
},
{
"id": "07a053d6dd1d68a0",
"type": "group",
"styleAttributes": {},
"x": 640,
"y": 334,
"width": 596,
"height": 427,
"color": "6",
"label": "Productivity"
},
{
"id": "37abaca3fa89fb9b",
"type": "text",
"text": "Better mental and productivity",
"styleAttributes": {},
"x": 799,
"y": 681,
"width": 294,
"height": 60
},
{
"id": "6dc127fe4098cce8",
"type": "text",
"text": "Enjoy hobbies during day (5)",
"styleAttributes": {},
"x": 256,
"y": 417,
"width": 242,
"height": 60
},
{
"id": "0bc6527818df845c",
"type": "text",
"text": "Sticking to schedule (6)",
"styleAttributes": {},
"x": 660,
"y": 500,
"width": 250,
"height": 60
},
{
"id": "5159e3993f95efcb",
"type": "text",
"text": "Avoid bad habits (3)",
"styleAttributes": {},
"x": -138,
"y": 135,
"width": 183,
"height": 60
},
{
"id": "3b1083b1e372d498",
"type": "text",
"text": "Reading (4, 7)",
"styleAttributes": {},
"x": 42,
"y": 417,
"width": 170,
"height": 60
},
{
"id": "f897f4ba690283db",
"type": "text",
"text": "Sleep Schedule Improvement (1)",
"styleAttributes": {},
"x": -189,
"y": 600,
"width": 295,
"height": 50
},
{
"id": "2db72cbff0c7ee4d",
"type": "text",
"text": "Better communication and reliability",
"styleAttributes": {},
"x": 101,
"y": 711,
"width": 222,
"height": 60
},
{
"id": "a07ffd84eb97e076",
"type": "text",
"text": "Good habits",
"styleAttributes": {},
"x": 106,
"y": 264,
"width": 150,
"height": 60
},
{
"id": "7b69dd32a9ff908b",
"type": "text",
"text": "Using the same [[Plan#Proven studying tactics | Study tactics]]",
"styleAttributes": {},
"x": 933,
"y": 500,
"width": 283,
"height": 60
},
{
"id": "3aa369dab5e52be1",
"type": "text",
"text": "Better Social (9)",
"styleAttributes": {},
"x": -215,
"y": 900,
"width": 186,
"height": 60
},
{
"id": "8dbfb266e9ae4898",
"type": "text",
"text": "Physical health routine (2) becomes manageable ",
"styleAttributes": {},
"x": -7,
"y": 900,
"width": 263,
"height": 60
},
{
"id": "1a3aafd43063deaf",
"type": "text",
"text": "Schedule better",
"styleAttributes": {},
"x": -129,
"y": 740,
"width": 176,
"height": 60
},
{
"id": "c53e69212bf4751a",
"type": "text",
"text": "Eating better (8)",
"styleAttributes": {},
"x": 287,
"y": 800,
"width": 181,
"height": 60
},
{
"id": "2b76b4366e7d7447",
"type": "text",
"text": "Consistency in terms of Uni",
"styleAttributes": {},
"x": 713,
"y": 354,
"width": 280,
"height": 60
}
],
"edges":[
{"id":"e7314215770b1b5a","fromNode":"5159e3993f95efcb","fromSide":"bottom","toNode":"a07ffd84eb97e076","toSide":"top","label":"Replace with"},
{"id":"ea778f6b684828c5","fromNode":"a07ffd84eb97e076","fromSide":"bottom","toNode":"3b1083b1e372d498","toSide":"top"},
{"id":"3d552b06f8e838d2","fromNode":"a07ffd84eb97e076","fromSide":"bottom","toNode":"6dc127fe4098cce8","toSide":"top"},
{"id":"ab96f7d33eb891ce","fromNode":"3b1083b1e372d498","fromSide":"bottom","toNode":"f897f4ba690283db","toSide":"right"},
{"id":"c0ad3200c4953eec","fromNode":"6dc127fe4098cce8","fromSide":"bottom","toNode":"f897f4ba690283db","toSide":"right"},
{"id":"e5b241cb68304667","fromNode":"f897f4ba690283db","fromSide":"bottom","toNode":"1a3aafd43063deaf","toSide":"top"},
{"id":"b9c17ff043c9c20c","fromNode":"1a3aafd43063deaf","fromSide":"bottom","toNode":"3aa369dab5e52be1","toSide":"top"},
{"id":"bb5bfaec52770979","fromNode":"1a3aafd43063deaf","fromSide":"bottom","toNode":"8dbfb266e9ae4898","toSide":"top"},
{"id":"abbd6f42e11801dd","fromNode":"f897f4ba690283db","fromSide":"left","toNode":"5159e3993f95efcb","toSide":"left","label":"Feeds into"},
{"id":"a54109b27e452e8c","fromNode":"c53e69212bf4751a","fromSide":"bottom","toNode":"8dbfb266e9ae4898","toSide":"right"},
{"id":"14046f7578cefaf8","fromNode":"2b76b4366e7d7447","fromSide":"bottom","toNode":"0bc6527818df845c","toSide":"top"},
{"id":"f3acc398f229e47b","fromNode":"2b76b4366e7d7447","fromSide":"bottom","toNode":"7b69dd32a9ff908b","toSide":"top"},
{"id":"cf8880dfe4e23cf4","fromNode":"0bc6527818df845c","fromSide":"bottom","toNode":"37abaca3fa89fb9b","toSide":"top"},
{"id":"74f1bc2742e8f761","fromNode":"7b69dd32a9ff908b","fromSide":"bottom","toNode":"37abaca3fa89fb9b","toSide":"top"},
{"id":"73a16c85641dc4ff","fromNode":"163ca037f71a0b6f","fromSide":"right","toNode":"07a053d6dd1d68a0","toSide":"left"},
{"id":"a5d14c192499b2f9","fromNode":"1a3aafd43063deaf","fromSide":"right","toNode":"2db72cbff0c7ee4d","toSide":"left"}
]
"edges": [
{
"id": "e7314215770b1b5a",
"fromNode": "5159e3993f95efcb",
"fromSide": "bottom",
"toNode": "a07ffd84eb97e076",
"toSide": "top",
"label": "Replace with"
},
{
"id": "ea778f6b684828c5",
"fromNode": "a07ffd84eb97e076",
"fromSide": "bottom",
"toNode": "3b1083b1e372d498",
"toSide": "top"
},
{
"id": "3d552b06f8e838d2",
"fromNode": "a07ffd84eb97e076",
"fromSide": "bottom",
"toNode": "6dc127fe4098cce8",
"toSide": "top"
},
{
"id": "ab96f7d33eb891ce",
"fromNode": "3b1083b1e372d498",
"fromSide": "bottom",
"toNode": "f897f4ba690283db",
"toSide": "right"
},
{
"id": "c0ad3200c4953eec",
"fromNode": "6dc127fe4098cce8",
"fromSide": "bottom",
"toNode": "f897f4ba690283db",
"toSide": "right"
},
{
"id": "e5b241cb68304667",
"fromNode": "f897f4ba690283db",
"fromSide": "bottom",
"toNode": "1a3aafd43063deaf",
"toSide": "top"
},
{
"id": "b9c17ff043c9c20c",
"fromNode": "1a3aafd43063deaf",
"fromSide": "bottom",
"toNode": "3aa369dab5e52be1",
"toSide": "top"
},
{
"id": "bb5bfaec52770979",
"fromNode": "1a3aafd43063deaf",
"fromSide": "bottom",
"toNode": "8dbfb266e9ae4898",
"toSide": "top"
},
{
"id": "abbd6f42e11801dd",
"fromNode": "f897f4ba690283db",
"fromSide": "left",
"toNode": "5159e3993f95efcb",
"toSide": "left",
"label": "Feeds into"
},
{
"id": "a54109b27e452e8c",
"fromNode": "c53e69212bf4751a",
"fromSide": "bottom",
"toNode": "8dbfb266e9ae4898",
"toSide": "right"
},
{
"id": "14046f7578cefaf8",
"fromNode": "2b76b4366e7d7447",
"fromSide": "bottom",
"toNode": "0bc6527818df845c",
"toSide": "top"
},
{
"id": "f3acc398f229e47b",
"fromNode": "2b76b4366e7d7447",
"fromSide": "bottom",
"toNode": "7b69dd32a9ff908b",
"toSide": "top"
},
{
"id": "cf8880dfe4e23cf4",
"fromNode": "0bc6527818df845c",
"fromSide": "bottom",
"toNode": "37abaca3fa89fb9b",
"toSide": "top"
},
{
"id": "74f1bc2742e8f761",
"fromNode": "7b69dd32a9ff908b",
"fromSide": "bottom",
"toNode": "37abaca3fa89fb9b",
"toSide": "top"
},
{
"id": "73a16c85641dc4ff",
"fromNode": "163ca037f71a0b6f",
"fromSide": "right",
"toNode": "07a053d6dd1d68a0",
"toSide": "left"
},
{
"id": "a5d14c192499b2f9",
"fromNode": "1a3aafd43063deaf",
"fromSide": "right",
"toNode": "2db72cbff0c7ee4d",
"toSide": "left"
}
],
"metadata": {}
}

File diff suppressed because it is too large Load Diff

View File

@ -2,6 +2,7 @@
title: Understanding Induction
abstract: I am fucking stupid and I can't understand this
author:
type: theoretical
---
## Understanding Induction

View File

@ -0,0 +1,220 @@
---
type: theoretical
---
## We're gonna be doing
- General intro
- Unsupervised learning
- Supervised learning
### Philosophical Introduction
- What is intelligence?
- Can machines **ever** be intelligent?
### Intelligent systems
a system that can:
- Perceive
Interaction with the environment. e.g. computer vision, speech recognition
- Make decisions
process incoming information, analyze it, and make decisions based on it.
e.g. self-driving cars, game playing
- Learn
improve performance over time, i.e. data driven adaptation based on observations *only* (for unsupervised learning) or based on observations and feedback (for supervised learning)
## Relevant Mathematical Notation
Models are noted as $m = \gamma (D)$, where $D$ is the data and $\gamma$ is the model.
Example - a model that predicts the price of a house based on its size and location:
$m = \gamma ( \beta_0 + \beta_1 x_1 + \beta_2 x_2)$
where $x_1$ is the size of the house and $x_2$ is the location of the house.
## Unsupervised learning
- Compression
Represent all the data in a more compact form (few features)
- Clustering
Identify groups of similar data points
- Reduction
Reduce the dimensionality of the data, i.e. represent large amount of data by few prototypes[^1]
The above aims define a **cost function** or optimization strategy, which is used to teach the machine to learn, but thee is no feedback from the environment. (hence **un**supervised learning).
Example:
Consider a dataset of images of cats and dogs. We can use unsupervised learning to identify the features that are common to all cats and all dogs. This can be used to classify new images of cats and dogs.
## Supervised learning
*Classification/Regression*
Data: observations, e.g. images, text, etc. and labels, e.g. cat/dog, spam/not spam, etc.
Regression problems:
- Predict quantitative values, e.g. house prices, stock prices, etc.
e.g. predict the weight of a cow based on its size:
$m = \gamma ( \beta_0 + \beta_1 x_1)$
where $x_1$ is the size of the cow.
Classification problems:
- Predict qualitative values, e.g. cat/dog, spam/not spam, etc.
- Binary classification: two classes
- Multi-class classification: more than two classes
> [!IMPORTANT]
> It is crucial to find the right features to represent the data. The model is only as good as the features used to represent the data.
### Some issues
- Complexity of the model
- Parametrization[^2] of a hypothesis
- Noise in the dataset
## Other forms of learning
- Semi-supervised learning, self-supervised learning
Partially labeled data, e.g. some images are labeled, some are not. Extend by making predictions on the unlabeled data and using the predictions to improve the model.
- Reinforcement learning
Delayed reward (feedback) from the environment. e.g. game playing, robotics, etc.
- Transfer learning, few-shot learning, single-shot learning
Use knowledge from one task to improve performance on another task. e.g. use knowledge from a large dataset to improve performance on a smaller dataset.
## Deeper look of reinforcement learning
There's a reward signal evaluating the outcome of past actions.
Problems involving an agent[^3], an environment, and a reward signal.
The goal is to learn a policy that maximizes the reward signal.
```mermaid
graph TD
A[Agent] --> B[Environment]
B --> C[Reward signal]
C --> A
```
### Mathematical Formulation
[Markov Decision Process](https://en.wikipedia.org/wiki/Markov_decision_process)[^5] (MDP) is a mathematical framework for modeling decision-making in situations where outcomes are partly random and partly under the control of a decision maker.
An MDP consists of:
- A set of states $S$
- A set of actions $A$
- A reward function $R$
- A transition function $P$
- A discount factor $\gamma$
It can be represented as a tuple $(S, A, R, P, \gamma)$.
Or a graph:
```mermaid
graph TD
A[States] --> B[Actions]
B --> C[Reward function]
C --> D[Transition function]
D --> E[Discount factor]
```
The process itself can be represented as a sequence of states, actions, and rewards:
$(s_0, a_0, r_0, s_1, a_1, r_1, s_2, a_2, r_2, \ldots)$
The goal is to learn a policy $\pi$ that maps states to actions, i.e. $\pi(s) = a$.
The policy can be deterministic or stochastic[^4].
1. At time step $t=0$, the agent observes the current state $s_0$.
2. For $t=0$ until end:
- The agent selects an action $a_t$ based on the policy $\pi$.
- Environment grants reward $r_t$ and transitions to the next state $s_{t+1}$.
- Agent updates its policy based on the reward and the next state.
To summarize:
$$
G_t = \Sigma_{t\geq 0}y^t r_t = r_t + \gamma r_{t+1} + \gamma^2 r_{t+2} + \ldots
$$
where $G_t$ is the return at time step $t$, $r_t$ is the reward at time step $t$, and $\gamma$ is the discount factor.
## The value function
The value function $V(s)$ is the expected return starting from state $s$ and following policy $\pi$.
$$
V_\pi(s) = \mathbb{E}_\pi(G_t | s_t = s)
$$
Similarly, the action-value function $Q(s, a)$ is the expected return starting from state $s$, taking action $a$, and following policy $\pi$.
$$
Q_\pi(s, a) = \mathbb{E}_\pi(G_t | s_t = s, a_t = a)
$$
### Bellman equation
Like Richard Bellman from the [Graph Algorithms](Graph%20Algorithms.md).
States that the value of a state is the reward for that state plus the value of the next state.
$$
V_\pi(s) = \mathbb{E}_\pi(r_{t+1} + \gamma V_\pi(s_{t+1}) | s_t = s)
$$
## Q-learning
Something makes me feel like this will be in the exam.
The goal of Q-learning is to find the optimal policy by learning the optimal Q-values for each state-action pair.
What's a Q-value? It's the expected return starting from state $s$, taking action $a$, and following policy $\pi$.
$$
Q^*(s, a) = \max_\pi Q_\pi(s, a)
$$
The optimal Q-value $Q^*(s, a)$ is the maximum Q-value for state $s$ and action $a$. The algorithm iteratively updates the Q-values based on the Bellman equation. This is called **value iteration**.
## Conclusion
As with every other fucking course that deals with graphs in any way shape or form, we have to deal with A FUCK TON of hard-to-read notation <3.
![Comparison](assets/image.png)
[^1]: Prototypes in this context means a representative sample of the data. For example, if we have a dataset of images of cats and dogs, we can represent the dataset by a few images of cats and dogs that are representative of the whole dataset.
[^2]: Parametrization is the process of defining a model in terms of its parameters. For example, in the model $m = \gamma ( \beta_0 + \beta_1 x_1)$, $\beta_0$ and $\beta_1$ are the parameters of the model.
[^3]: An agent is an entity that interacts with the environment. For example, a self-driving car is an agent that interacts with the environment (the road, other cars, etc.) to achieve a goal (e.g. reach a destination).
[^4]: A deterministic policy maps each state to a single action, while a stochastic policy maps each state to a probability distribution over actions. For example, a deterministic policy might map state $s$ to action $a$, while a stochastic policy might map state $s$ to a probability distribution over actions.
[^5]:https://en.wikipedia.org/wiki/Markov_chain

Binary file not shown.

After

Width:  |  Height:  |  Size: 205 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

View File

@ -0,0 +1,55 @@
---
type: theoretical
ty:
---
## The foundations of computation
Looking for answers for basic questions like:
- Computability?
- Power $\leftrightarrow$ Programming constructs?
Which leads us to fundamental concepts like:
- State
- Transitions
- Non-determinism
- Undecideability
## Models
### Finite memory
Finite automata, regexp
![](Pasted%20image%2020250414190100.png)
### Finite memory with stack
Push down automata
![](Pasted%20image%2020250414190119.png)
### Unrestricted
Turing machines
![](Pasted%20image%2020250414190144.png)
## Grammars
![](Pasted%20image%2020250414190229.png)
On a higher level, it seems like grammars and machines are very different, but parsing a language (a set of strings) is quite similar to computation.
## State-based systems and glossary
An FSM can be a specification for OOP.
- States ($q_0,\ldots, q_n$)
- Transitions ($a,b,c,\ldots,z$)
- We can interpret the transitions as class methods and specify the sequences of allowed invocations - **typestate**
## Notation
- $x \in X, X\subseteq Y$
- $\forall x \in X: P(x), \exists x \in X: P(x)$
- $R \subseteq X \times Y$ is a relation between $X$ and $Y$
- $xRy \equiv (x,y) \in R$
- $G = (V, E)$, where $E \subseteq V\times V$ is a directed graph
Part of [Relations and Digraphs](Relations%20and%20Digraphs.md)

View File

@ -0,0 +1,61 @@
---
type: math
---
## Induction
Similar (if not the same) to:
- [Mathematical Proofs (Induction)](Mathematical%20Proofs%20(Induction).md)
- [Structural Proofs](Proofs.md)
- Base case $0\in \mathbb{N}$
- Inductive step - if $n\in \mathbb{N} \implies n+1\in \mathbb{N}$
- We allow a finite number of steps
I.e.
Given $f (n) = n(n + 1)$ for all $n\in N$, then $f (n)$ is even.
**Base case:** $f(0) = 0\times 1 = 0$, which is even
**I.S.:**
$$
f(n+1) = (n+1)(n+2)= n(n+1)+2(n+1) = f(n) + 2(n+1) \blacksquare
$$
## Strings and Languages
Literally the same as [Mathematical Data Structures](Mathematical%20Data%20Structures.md), but on strings
How to define the reversal of a string, inductively?
Let $w$ be a finite string. We define $w^R$ by induction on $|w|$:
**B.C.:**
$|w| = 0$, then, trivially, $w = \epsilon \therefore w^R = \epsilon$
**I.S.:**
$|w| = n \geq 1$, so $w = u a$ with $|u| = n-1$,
## Operations on strings
- Concatenation (associative)
- Substring, prefix, suffix
- Replication (exponentiation): a string concatenated with itself
- Reversal ($u^R$)
## Operations on languages
- Lifting operations on strings to languages
- Concatenation of languages
- Kleene star - $V^*$ -> smallest superset[^1] of V that contains the empty string and is closed under concatenation, i.e. one or more repetitions
- Reversal
## Regular sets / languages
This used to be in DS, but I don't have it in this repo.
Recursively defined over an alphabet $\Sigma$ from
- $\emptyset$
- $\{\epsilon\}$
- $\{a\} | \forall a \in \Sigma$
Regex is a notatio nto denote regular languages, i.e.:
[^1]: The opposite of a subset - a set which contains all elements of another (and possibly more)

View File

Before

Width:  |  Height:  |  Size: 7.5 KiB

After

Width:  |  Height:  |  Size: 7.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 225 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 95 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 894 KiB

View File

@ -0,0 +1,341 @@
---
type: theoretical
backlinks:
- "[[Memory Management]]"
---
A file system consists of two parts
- Collection of files
- A directory structure -> provides information about all files in the system
## File
- Logical view -> the unit of storing data
Files are mapped by the OS onto physical nonvolatile devices
**Types:**
- Data
- Numeric
- Character
- Binary
- Program
**Attributes**:
- Name
- Identifier (unique number)
- Type[^2]
- Location -> pointer
- Size
- Protection (permissions)
- Datetime and user id
All of these are stored in **i-nodes**.
### INodes
- Size in biytes
- Access permissions
- Type
- Creation and last access datetime
- Owner ID
- Group ID
- Hard link count
### Logical Definition
- Named collection of related information
- Files may have free form (text files) or can be rigidly formatted[^1]
### Operations
- Create
- Write
- Read
- Seek (reposition within file)
- Delete
- Truncate - shorten or cut off by removing data from the end
- Open (load to memory)
- Close (unload)
### Open files
Tracked by an **open-file table**, counted by **file-open count**.
In order to [avoid race conditions](Inter-Process%20Communication.md#Avoiding%20race%20conditions), we need to lock the files somehow.
- **Shared lock** -> several processes can acquire concurrently, used for reads
- **Exclusive lock** -> writer lock
- Mandatory vs. advisory -> access is denied depending on locks held and requested vs. processes can find status of locks and decide what to do
### Structure
Could be many:
- None
- Simple record
- Lines
- Fixed length
- Variable length
- Complex
- Formatted document
- Relocatable load file [^3]
## Directories
Collection of nodes containing information about all files. Also resides on disk.
**Operations**:
- Search for a file
- Create a file
- Delete a file
- List a directory
- Rename a file
- Traverse file system
### Single level directory
A single directory for all users.
Clearly, we need unique names, which can become a problem real fast. That shit is gonna grow super big.
### Two-level directory
Users have different directories. In Linux -> `/home/user` is separate, allowing for the same file names. Linux, however, uses a multi-level:
### Tree-Structured Directories
- Efficient searching
- Grouping
- Absolute v. relative path
### Acyclic-Graph
Have shared subdirectories and files. Symlinks achieve this.
### Structure
In Linux, it is a table (a file) which stores:
- File name
- Inode
## Symlinks
**Hard** vs **Soft**. Hard is a literal copy of the file but keep the same inode info, while soft is just a pointer.
>[!IMPORTANT]
>We only allow links to files to avoid cycles Every time a new link is added we also use a cycle detection algorithm to determine whether it is OK
## Disk
Can be subdivided into **partitions**.
Disk/partition can be used **raw** (no file system) or can be **formatted**. The entity containing the file system is known as a volume.
> [!NOTE]- Typical fs organization
> ![](Pasted%20image%2020250505144352.png)
### Layout
![](Pasted%20image%2020250505155546.png)
- **Boot block**
- Contains initial bootstrap program to load the OS
- Typically the first sector reads another program from the next few sectors
- **Super block** - state of the file system
- Type -> ext3,ext4,FAT, etc.
- Size -> Number of blocks
- Block size
- Block group information -> number of block groups in file system
- Free block count
- Free inode count
- Inode size
- FS mount info
- Journal info
### Free space management
Unix uses a bitmap to show free disk blocks. Zero=free, one=in use
## Access lists and groups
Read, write and execute.
Three classes of users on Linux
1. Owner -> 7 (Read Write Execute)
2. Group -> 6 (RW)
3. Public -> 1 (X)
## Blocks
The IDs of data blocks are stored in [INodes](File%20Systems%20Management.md#INodes), the IDs of the first 12 blocks are stored in direct reference fields.
![](Pasted%20image%2020250505154746.png)
### Allocation
- Contiguous -> Stored in a single block
- Linked Allocation -> blocks contain a pointer to the next one (slower access)
- Indexed -> Each file has an index block that stores pointers to all its data blocks
### Groups
Subdivision of the entire disk or partition
Has:
- A block bitmap
- An inode bitmap
- An inode table holding the actual inodes
> [!INFO]
> Default block group size in ext4 is 128MB
## Journaling
Ensure the integrity of the file system by keeping track of changes before they are actually applied to the main file system
Phases:
- Write-ahead logging -> before any changes are made to the file system
- Commit -> shit actually happens
- Crash recovery -> we can replay the journal to apply any uncommitted changes
Types:
- Write-Ahead Logging (WAL) -> logs changes before they are applied to the file system
- Metadata journaling -> only metadata is logged. Metadata is restored to a consistent state if crash.
- Full journaling -> both
## Example: EXT4
- Journaling
- Larger file and volume sizes
- Extents -> range of contiguous blocks, reduces fragmentation
- Multiblock allocator -> multiple blocks at once
- `fsck`, optimized file system check
- Pre-allocation
- Checksums -> ensure integrity
## Example: Windows FS
### FAT(32)
File allocation table.
No hard links :C. Directory contains:
- File name -> can be up to 8 characters and extension up to 3
- Attributes (one byte)
![](Pasted%20image%2020250505160518.png)
- File size -> four byte field for filesize in bytes. Max. 4GB
- ID of first block (4 byte)
- File size
Obviously this is trash since it cannot be used with disk of very large capacities. Windows introduced clustering 4,8,16 blocks together.
The table itself is a list of blocks where many links are created and stored. Each entry is 4 bytes. List of empty blocks is also stored.
![](Pasted%20image%2020250505161031.png)
Note the reserved blocks. They contain:
- Boot sector (VBR)
- Bios parameter block
- Bootloader code
- Sector, cluster size, FAT count, root directory location
- FS information Sector (only for FAT32)
- Last allocated cluster for speed
- Backup boot sector
- In case of corruption
#### Free blocks list
Stores a value for each cluster which can indicate:
- `0x00000000` -> Free cluster
- Next cluster number -> Cluster is allocated and points to the next one
- `0xFFFFFFF8` - `0xFFFFFFFF` -> EOF
- `0xFFFFFFF7` -> bad cluster
To find a free block we just need to search for the first available cluster. We keep the last allocated cluster, optimizing search time.
### NTFS
New Technologies File System.
- Everything is a cluster
- Size is a multiple of disk block size
- Journaling
- File data compression
![](Pasted%20image%2020250505161542.png)
- Boot sector (VBR)
- NTFS signature and other boot info
- Location of Master File Table (MFT)
- Sector 0 of partition
- MFT
- Stores metadata for every file and directory
- MFT entry that stores attributes
- name
- size
- timestamps
- security
- MFT itself is described in the MFT lmfao
- File system metadata
- $MFT, $Bitmap , $LogFile, $Secure, etc. store metadata
- System files are treated like regular files
- Data
- Actual file content, either stored in MFT for small entries or in separate clusters (large files)
- Uses extents[^4] and B+ trees[^5]
- Supports encryption
#### MFT entry
Each file or directory is represented by a 1KB entry:
- File name
- Info (timestamps, perms)
- Data location (resident[^6] or not)
- Index
- Attributes
![](Pasted%20image%2020250505162331.png)
##### `$DATA`
- Mft Entry
- If the file contains regular data, the `$DATA` attribute stores the file content or the location
- For files that fit in a single MFT record (1KB usually)
- In-place storage of data (resident)
- For larger files, the `$DATA` attribute contains data runs, which are pointers that tell NTFS where the file's data is located on the disk. Typically a sequence of three values
- offset/ length byte
- Cluster count
- Cluster offset
##### Bitmaps
- Map of logical clusters in use and not. Same as FAT.
##### Compression
Compresses data in 16-cluster chunks.
Size of a compression unit (chunk) depends on cluster size:
- 4 KB cluster size -> 64 KB compression unit (most common on modern volumes)
- 8 KB cluster size -> 128 KB compression unit
If a chunk is not compressible to at least 50%, NTFS stores it uncompressed.
Uses LZNT1, a variation of (LZ77)
##### Journaling
Logs all file system changes in the `$LOGFILE` before applying them.
- It can detect bad sectors and mark them in `$BadClus`
- NTFS can recover a corrupted MFT using `$MFTMirr`
- NTFS uses ACLs to manage permissions
- Each file stores a `$SECURITY_DESCRIPTOR`
### Security descriptors
```
Owner: S-1-5-21-3623811015-3361044348-30300820-1001 (User: Alice)
Group: S-1-5-32-544 (Administrators)
DACL:
Allow: S-1-5-21-3623811015-3361044348-30300820-1001 (Alice) - Full Control
Deny: S-1-5-21-3623811015-3361044348-30300820-1002 (Bob) - Read Access
Allow: S-1-5-18 (Local System) - Full Control
SACL:
Audit: S-1-5-21-3623811015-3361044348-30300820-1003 (Eve) - Log Failed
Access
```
Where DACL = **Discretionary Access Control List** and SACL = **System Access Control List**
---
[^1]: **Columnar**, fixed-format ASCII Files have fixed field lengths, as opposed to **delimited**, i.e. fields can be as large as we want them to
[^2]: Extension (.pdf, .txt) as opposed to format, which specifies the [grammar](Regular%20languages.md) of the file
[^3]: contains information about where to place different parts of the program in memory.
[^4]: contiguous area of storage reserved for a file in a file system, represented as a range of block numbers, or tracks on count key data devices
[^5]: Balanced based on height tree. Nodes can contain multiple keys and pointers. Leaf nodes are the data records, upper nodes only store ketys. Ordered (BST).
[^6]: In the MFT entry straight up.

View File

@ -0,0 +1,125 @@
---
type: theoretical
---
I/O devices can be divided into two categories:
- Block devices -> store information in fixed-size blocks, each one with its own address
- Character devices -> deliver or accepts a stream of characters (no regard to any structure)
- Obviously, it is not addressable
They have components:
- Mechanical -> the thing (sensor blah blah)
- Electronic -> the controller
- Converts serial bit stream to block of bytes
- Perform error detection
- Make available to main memory
## Memory-Mapped I/O
Controllers have a few registers that are used for communcation with the CPU. Writing in these registers, the OS can command the device to perform an action. By reading the registers, it can tell its state.
> [!IMPORTANT]
> In memory-mapped I/O each control register is assigned a unique memory address to which no memory is assigned.
In most systems, the assigned addresses are at or near the top of the address space.
![](Pasted%20image%2020250505192629.png)
### Advantages
- Special I/O instructions are needed to read and write the control registers
- Simple device driver
- With memory-mapped I/O, no special protection mechanism is needed to keep user processes from performing I/O.
- If each device has its control registers on a different page of the address space, the operating system can give a user control over specific devices but not others by simply including the desired pages in its page table.
### Disadvantages
- Caching the page of a device control register would be a problem
- The DCRs are cached => references would just take value from cache (stale data)
- Infinite loop is possible, since the software would never know whether the device is ready
- Single bus -> everyone looks at every address
- One address space, then all memory modules *and* all I/O devices must examine all memory references to see which ones to respond to
![](Pasted%20image%2020250505193602.png)
## DMA
Requesting one byte at a time from an I/O controller wastes the CPUs time. Instead, DMA (Direct Memory Access) is often used
DMA controllers has access to the system bus **independently** of the CPU
Includes registers that can be written and read by the CPU:
- Memory address register
- Byte count register
- One or more control registers to specify the I/O port to use, direction of transfer, transfer unit, number of bytes in one burst
![](Pasted%20image%2020250505194426.png)
### Accessing BUS
- Word-at-a-time -> request the transfer of one word and gets bus, CPU waits (cycle stealing)
- block mode -> acquire the bus, issue a series of transfers and releast. burst mode.
### Accessing memory
We can do main memory -> fly-by mode.
An alternative mode is to have the device controller send the word to DMA controller, which the issues a bus request to write the word.
### Device Internal Buffer
- Read the data into its internal buffer first
- Verify the checksum (error checking) before starting a transfer
- The bits keep arriving at a constant rate, whether the controller is ready for them or not
- DMA transfer to memory is not time critical
> [!IMPORTANT] No DMA
> Not all computers use DMA, CPU is often far faster than the DMA controller and can do the job muuuch faster.
## Interrupts
1. I/O device requests a service, it causes an interrupt, asserting a signal on a bus line assigned to it
2. Signal is detected by the interrupt controller chip
3. If no other interrupts are pending, interrupt controller handles interrupt
- Otherwise, we do priority, with the device sending interrupt signal until its serviced
4. Controller puts a number on the address lines specifying which device wants attention
5. Interrupt causes CPU to stop what it is doing
6. Number on address lines is used as an indexed into a table called the interrupt vector
![](Pasted%20image%2020250505195901.png)
![](Pasted%20image%2020250505200028.png)
### Handling interrupts
- Microprogram or hardware checked to see if there was an interrupt pending.
- Instruction cycle:
- Fethc
- Decode
- Read operands
- Execute
- Store
- Check for interrupts
This can be pipelined
### Precise interrupts
1. PC is saved in a known place
2. All instructions before the one pointed to by the PC have completed
3. No instruction beyond the current one has vfinished
4. Execution state is known
An interrupt that doesn't meet the above requirements is called **imprecise**
![](Pasted%20image%2020250505200548.png)
## Clocks
![](Pasted%20image%2020250505201102.png)
> [!IMPORTANT]
> Watchdog timers **ARE clocks**
## Mass Storage
Secondary storage for modern computers and shit.

View File

@ -0,0 +1,143 @@
---
type: theoretical
backlinks:
- "[[Overview#Multiprogramming]]"
---
## Intro
Processes frequently need to communicate with other
processes.
### Independent
Cannot affect or be affected by other processes.
### Dependent
The opposite
### Why?
- Information sharing
- Computation speedup
- Modularity
- Convenience
## Methods
### Shared memory
```mermaid
block-beta
columns 1
a["Process A"] Shared b["Process B"] ... Kernel
```
Processes/threads exchange information by writing in shared memory variables. This is where [Concurrency](Inter-Process%20Communication.md#Concurrency) becomes an issue.
#### Synchronization
To remedy the race condition issue, we should synchronize shared memory access. In other words, **when a process writes to shared memory, others mustn't be able to**. Multiple read access is allowed though.
### Message passing (queueing)
```mermaid
block-beta
columns 1
a["Process A"] b["Process B"] ... ... Kernel
```
Processes communicate with each other by exchanging messages. A process may send information to a port, from which another process may receive information.
We need to at least be able to `send()` and `receive()`.
## Concurrency
Accessing the same shared memory might at the same time will cause issues. This occurrence is called a **Race Condition**.
> [!IMPORTANT]- When does it occur?
> A race condition occurs when some processes or threads can access (read or write) a shared data variable concurrently and at least one of the accesses is a write access (data manipulation).
The result depends on when context switching[^1] happens.
The part of the program where shared memory is accessed is called the **Critical Section (CS)**.
### Critical Regions/Sections
![|600](Pasted%20image%2020250502174340.png)
### Avoiding race conditions
> [!IMPORTANT]- Conditions
>1. No two processes may be simultaneously inside their critical regions. (Mutual Exclusion)
>2. No assumptions may be made about speeds or the number of CPUs.
>3. No process running outside its critical region may block other processes.
>4. No process should have to wait forever to enter its critical region. (Starvation)
### Locks
A flag which tells us whether the shared memory is currently being written into.
### Mutexes
Mutual exclusion - a type of **lock**, specifically to enforce point 1 in [Avoiding race conditions](Inter-Process%20Communication.md#Avoiding%20race%20conditions) (i.e. enforcing mutual exclusion).
### Semaphores
Can alllow more than one thread to access a resource. Based on amount of permits. Could be a binary one, which is essentially just a lock, otherwise is called a counting semaphore, only allowing as much writes as implemented.
Use a `wait()`
```c
void wait(int *S){
while((*S)<=0); // busy waiting
(*S)--;
}
```
and a `signal()`:
```c
void signal(int *S){
(*S)++;
}
```
Just keep a fucking list of processes currently semaphoring.
#### Producer-Consumer Problem
- The producer produces items and places them in the buffer.
- The consumer removes items from the buffer for processing
We need to ensure that when a producer is placing an item in the buffer, then at the same time consumer should not consume any item. In this problem, the buffer is the **critical section**.
> [!example]- How do we solve this?
> To solve this problem, we need two counting semaphores Full and Empty. “Full” keeps track of some items in the buffer at any given time and “Empty” keeps track of many unoccupied slots.
#### Readers-Writers Problem
* Multiple readers can access the shared data simultaneously without causing any issues because they are only reading and not modifying the data.
* Only one writer can access the shared data at a time to ensure data integrity
We already mentioned this in[Synchronization](Inter-Process%20Communication.md#Synchronization)
> [!example]- How do we solve this?
> Two solutions. We either give priority to readers or writers, but we have to do so consistently. This means that we let read/write happen first until exhaustion.
### Peterson's Algorithm
Where `i` and `j` are separate processes.
1. Two Boolean flags: one for each process (e.g.,`flag[0]` and `flag[1]`). Each flag indicates whether the corresponding process wants to enter the critical section.
2.
```c
flag[i] = true;
turn = j;
while (flag[j] && turn == j) {
// busy wait[^2]
}
```
3. Once the while loop condition fails, process i enters its critical section.
4. After finishing its critical section, process i sets flag[i] to false.
5. Repeat!
## Monitors
A high-level abstraction that provides a convenient and effective mechanism for process synchronization.
It defines procedures (i.e. methods).
> [!WARNING]
> Only one process may be executing any of the monitor's procedures at a time within the monitor at a time
It uses **condition variables** (often with wait and signal[^3]operations) to allow threads to wait for certain conditions to be met before proceeding.
![|600](Pasted%20image%2020250502180811.png)
## Endianness
![](Pasted%20image%2020250505163335.png)
---
[^1]: [Context switching](Processes%20and%20Threads.md#Context%20switching)
[^2]: Process repeatedly checks a condition (usually in a loop) until a desired event or state is reached. Not too different from polling.
[^3]: to the other threads

View File

@ -0,0 +1,38 @@
## Hard disks (HDDs)
Spin platters of magnetically-coated material under moving RW heads
- Rotate at 60 to 250 times/s
- Transfer rate is rate at which data flows between drive and pc
- Positioning time (Random access time) to move disk arm to desired cylinder (seek time) and time for desired sector to rotate under the disk head (rotational latency)
![](Pasted%20image%2020250505201859.png)
### Controller
Receives data/ command from the OS, controls I/O operation and sends back results to OS
![](Pasted%20image%2020250505201950.png)
### I/O steps
- Move head to track -> Seek Time (S)
- Find sector -> Rotational Latency Time (r)
- Transfer data to/from controller -> Block Transfer Time (btt)
- Error checking and report to OS
### Disk Formatting
Each platter must receive a low-level format done by software, i.e. marking the cylinders and tracks, each containing a few sectors, with short gaps in between
Each sector includes:
- Preamble: starts with start of the sector bit pattern, the cylinder and sector numbers
- Data: size of the data portion is determined by the formatting program -> 512 bytes usually
- ECC: error correction, typically 16 bytes
Numbering the sectors
- Position of sector 0 on each track is offset from the previous track when the low-level format is laid down (cylinder skew)
![](Pasted%20image%2020250505202532.png)
![](Pasted%20image%2020250505203114.png)

View File

@ -0,0 +1,158 @@
---
type: theoretical
backlinks:
- "[[Inter-Process Communication]]"
- "[[Overview#Multiprogramming]]"
- "[[Overview#Multitasking/Timesharing]]"
---
![|300](Pasted%20image%2020250502181012.png)
(C.U. - Control Unit, does FDE and communicates with everything like ALU, registers, etc.)
## Single-Programing Operating Systems
Can only run one program lmao. MS-DOS is like that.
## Address Binding
- Depends on availability of memory space
- Addresses of instructions and variables change with memory location (where the program is loaded)
- Source code addresses are usually **symbolic**, while compiled code addresses bind to **relocatable** addresses
- Linker[^1] or loader[^2] will bind relocatable addresses to **absolute** addresses.
This last step is what's called **address binding**
## Binding to memory
Can happen at different "times":
- Compile time
- Load time
- Execution time
## Multi-Programming Operating systems
Introduce some decisions that the OS needs to make:
- Where to load each program
- Protecting the programs
- Optimization
### Protection
Operating system should be protected against unauthorized accesses by user process to its memory space.
We could do this via hardware, where the CPU must check every memory access generated in user mode to be sure it is between base and limit for that user.
## Contiguous memory -> Segmentation :)
The first solution for memory management is segmentation.
- Allocates memory space to each process
- As big as the process
### Fragmentation
- Each process requires a contiguous block
> [!caution] But fragmentation bad!
> It happens when the free space of the main memory is divided up into multiple parts **between** processes. It's pretty clear why that's bad.
![|600](Pasted%20image%2020250502182934.png)
![|600](Pasted%20image%2020250502183002.png)
### Memory Allocation Algorithms
Define which free segment should be allocated to a new process.
#### Best Fit
![|600](Pasted%20image%2020250502183152.png)
#### Worst fit
![|600](Pasted%20image%2020250502183221.png)
#### First fit
![|600](Pasted%20image%2020250502183242.png)
#### Compaction
![|600](Pasted%20image%2020250502183310.png)
>[!warning]- Issues with this
>1. If main memory is large, it can take a long time
>2. Complex
>3. Moving a block while it is being used will cause data loss
## Direct Memory Access (DMA)
In order to let CPU execute process instructions while data is being transferred from disk to memory, direct memory access (DMA) is used.
![|600](Pasted%20image%2020250502183523.png)
## Fix-sized memory blocks
In this scheme, memory is partitioned into blocks of the same size. Every block has the same number of bytes, which makes it easier for the system to keep track of which portions of memory are allocated or available.
>[!IMPORTANT]- What's a block?
> A block refers to one of these fixed-size segments. When a process requests memory, the operating system can allocate one or more blocks to satisfy that request. The fixed size means that each block is predictable in terms of its starting address and size.
We can circumvent the problem using fixed and equal-sized memory blocks.
This, however, introduces another problem - If multiple blocks are allocated to a process, addressing will be a challenge. Which blocks do we address???
>[!example]-
>If an element of an array is at address 2000, the next element may be at another block at a very different address
### Attributes of a block
Each block belongs to one of the memory segments of the program:
- Code
- Data
- Stack
- Heap
For each block, the access permissions can be defined.
>[!question]- What permissions?
> - A block is defined as execute only if it is part of the code segment
> - A block from stack segment has read and write permissions but no execute permission
### Storing block information
For each process, a list of allocated blocks is created. This list shows where each block of the program has been loaded in memory. Also, the permissions are set.
## Paging
Ahaha!
This is a logical follow-up of blocks. What if we have a lot of blocks? Well, we put them in a **page or frame = page frame**.
- The memory is divided into equal-size blocks named page frames
- A page frame has the same size as a page
### Loading
Each page is loaded onto the first available page frame of the main memory.
A page can belong to one segment of the program only ([Attributes of a block](Memory%20Management.md#Attributes%20of%20a%20block)).
### Logical v. Physical addresses
Each page is **numbered**.
- An instruction or a variable is located by its page number and offset inside the page. This is called a **logical address**.
- The actual address (i.e. `20010`) is referred to as a **physical address**.
### Page tables
Yet another box for a box for a box of boxes. This is just a data structure which holds pages.
- Each process has its own page table.
- Threads **share** the page table of the process
- [The process control block - PCB](Processes%20and%20Threads.md#The%20process%20control%20block%20-%20PCB) includes a pointer to the page table.
#### Implementation
Page table is kept in main memory.
Every data/instruction access requires two memory accesses - one for page table and one for the data. This can significantly slow down execution time.
So:
>[!question]- How do we solve this?
>We use Translation Look-Aside Buffers (TLB) - also called memory, which is *another* fucking data structure to hold the boxes of boxes of boxes of boxes.
#### TLBs
- Typically small
- Parallel and fast
- On a miss, value is loaded into the TLB for faster access next time
>[!IMPORTANT] **Effective access time** -- Super-duper important for exam
> - Hit ratio = % hits
> $$ EAT = (\text{hit ratio} \times T_{hit} + ((1 - \text{hit ratio}) \times T_{miss}) $$
> When solving these exercises, first identify the times for TLB hit (including TLB access and memory access) and for a TLB miss (which involves an extra memory access), then calculate the Effective Access Time (EAT) using the weighted average with the hit ratio. Finally, plug in the hit ratio and respective access times into the EAT formula and compute the result.
---
[^1]: That shit makes your program executable. Officially: The linker takes one or more object modules that contain relocatable addresses and combines them into a single executable.
[^2]: The loader is takes the executable and loads it into memory when the program is run.

View File

@ -0,0 +1,198 @@
---
type: theoretical
backlinks:
---
## Operating System
- Manages hardware and software
- Resource allocator
- Manages and routes resources and requests
- Control program
- Prevents errors and improper use
## User v. System
### User
```mermaid
graph TD;
A["user"] <--> B["application programs"]
B <--> C["operating system"]
C <--> D["computer hardware"]
```
### System
An operating system is a control program
## Major OS Components
| Name | Description |
| ------------------------ | -------------------------------------------------- |
| Kernel | Core part of the OS; manages CPU, memory, I/O |
| Process manager | Handles multitasking, scheduling, and execution |
| Memory Manager | Allocates/deallocates RAM, manages virtual memory. |
| File System | Organizes, stores, and secures files. |
| Device Manager | Interfaces with hardware peripherals |
| User Interface (CLI/GUI) | Allows users to interact with the OS |
## Management of resources
### CPU
- A program can do nothing unless its instructions are executed by a CPU
- An executing program becomes a **process**
- The process needs resources
- All processes can potentially be executed concurrently by multiplexing on one CPU core or in parallel across multiple
### Memory
- For a program to be executed, it must be loaded into memory.
- To improve the CPU utilization, several programs are kept in memory.
OS should:
- Know which parts of memory are currently being used and which process is using htem
- Allocate and deallocate memory accordingly
- Decide which processes and data moves in and out of memory
### Cache
- Cache operates at different levels (L1, L2, L3) and is managed primarily by hardware (CPU cache controllers), not the operating system.
- **HOWEVER**, the OS can influence this by
- Managing process scheduling which affects cache locality[^1]
- Optimizing memory access patterns
- Supporting cache-aware algorithms[^2]
### File System
- The operating system abstracts from the physical properties of its storage devices to define a logical storage unit, the file
### Mass Storage
The operating system is responsible for:
- Mounting and unmounting
- Free-space management
- Storage allocation
- Disk scheduling
- Partitioning
- Protection
### I/O
The I/O subsystem consists of several components:
- A memory-management component that includes buffering, caching, and spooling
- A general device-driver interface
- Drivers for specific hardware devices
## User and Operating System Interface
![](Pasted%20image%2020250204103541.png)
## Command Interpreters
Special programs that run when a user first logs on. They read and interpret user commands and execute programs.
On Linux, the command interpreter is called a **shell**. The most common shell is the **Bourne Again Shell (bash)**
## System calls
- System calls are the interface between the user and the operating system
- They are the only way a user program can request a service from the operating system
- They are typically written in a high-level language (C) and are accessed via a library
E.g.
```c
#include <stdio.h>
#include <unistd.h>
int main() {
write(1, "Hello, World!\n", 14); // 1 is the file descriptor for stdout, 14 is the number of bytes to write
return 0;
}
```
![example syscall](assets/image.png)
## APIs in the OS
- APIs are a set of functions that allow the programmer to interact with the OS
- They are typically written in a high-level language (C) and are accessed via a library
- They are used to interact with the OS and its services
i.e. Under the hood, widely used libraries like `stdlib.h` and `stdio.h` are just APIs that interact with the OS
## Performance Optimization
A computer system executes a program by:
- Loading the program into memory
- FDE cycle (Fetch, Decode, Execute)
- Repeat until the program is done
- The OS must manage resources to ensure that the program runs efficiently
### Instruction Types
- CPU-bound: spends most of its time executing instructions
- I/O-bound: spends most of its time waiting for I/O operations to complete
Keyword - **waiting**, hence I/O operations are very slow.
### How do we sped it up?
- Replace slow I/O devices
- Perform I/O operations independently of the CPU
- Multiprogramming/multitasking
#### Spooling
- Simultaneous Peripheral Operations On-Line
- A technique that uses a buffer to hold data for devices that are not currently in use
```mermaid
graph TD;
A["User"] --> B["Spooling"]
B --> C["Printer"]
B --> D["Disk"]
```
#### Multiprogramming
- The OS keeps several jobs in memory simultaneously
- Needed for efficiency
- Single users cannot keep the CPU and I/O devices busy at all times
- Scheduling is key
-
```mermaid
graph TD;
A["User"] --> B["Multiprogramming"]
B --> C["Job 1"]
B --> D["Job 2"]
```
#### Multitasking/Timesharing
- The OS switches between jobs so quickly that it appears as if they are running simultaneously
- If several jobs are ready to run at the same time, the OS must decide which one to run
```mermaid
graph TD;
A["User"] --> B["Multitasking"]
B --> C["Job 1"]
B --> D["Job 2"]
```
---
[^1]: is the tendency of a processor to access the same set of memory locations repetitively over a short period of time
[^2]: https://stackoverflow.com/questions/473137/a-simple-example-of-a-cache-aware-algorithm

View File

@ -0,0 +1,245 @@
---
type: theoretical
backlinks:
- "[[Overview#Multiprogramming]]"
- "[[Overview#Multitasking/Timesharing]]"
---
## Process
A program in execution.
Consists of:
* The program code - **text section**
* Current activity - **PC**, registers
* **Stack** -> Function parameters, return addresses, local variables
* Data section
* **Heap** -> dynamically allocated (at run time) memory
The difference between a process and a program is that the program is the executable file stored on disk, while the process **is running** (shocker).
### Creation
Four events could cause processes to be created
1. System init - Daemons
2. Executing a process by "running a program"
3. A user process request to create a new process
4. Initiation of a batch[^1] job
### `fork()`
A Linux [system call](Overview.md#System%20calls).
```mermaid
graph LR;
A["`fork()`"] --> |parent| B["wait"]
A --> |child|C["`exec()`"]
C --> D["`exit()`"]
D --> B
B --> E["Resumes"]
```
### Hierarchy
Linux creates a parent-child relationship between processes, Windows doesn't.
Linux:
```mermaid
graph TD;
init["init
pid = 1"]
login["login
pid = 8415"]
kthreadd["kthreadd
pid = 2"]
sshd["sshd
pid=3028"]
bash["bash
pid=8416"]
ps["ps
pid=9298"]
emacs["emacs
pid=9204"]
khelper["khelper
pid=6"]
pdflush["pdflush
pid=200"]
init --> login
init --> kthreadd
init --> sshd
login --> bash
bash --> ps
bash --> emacs
kthreadd --> khelper
kthreadd --> pdflush
```
### Termination
1. Normal
Process should return a code to its parent. Child processes should wait until they know that the parent received it, becoming **zombie processes**. If the parent dies before the child, the child is called an orphan. Absolutely fucking crazy naming. Every linux process should have a parent process [source: unicef](https://unicef.org).
2. Error - just a special return code
3. Fatal error, involuntary - division by zero, invalid opcode; process is immediately terminated by the system
4. Killed
### States
As a state machine
1. Running
2. Ready
3. Blocked (blocking == waiting)
```mermaid
graph TD;
A["Running"]
B["Ready"]
C["Blocked"]
A --> |1| C
A --> |2| B
B --> |3| A
C --> |4| B
```
#### Ready State
- In this state the process is not waiting for a resoucrce
- Can be executed
- Put in a queue (ready queue)
#### I/O queue
- I/O device has its own
- Multiple queues are created by OS
## Timesharing: In-depth
from [Multitasking/Timesharing](Overview.md#Multitasking/Timesharing)
The output of running programs should not change when we stop and switch back to the same program later on.
### Context switching
Switching implies that we have to store the values of registers, flags, PC, etc. of the current process and load them into the next one. Then we continue.
### The process control block - PCB
The OS needs a place to store the status of each process. This is that data structure.
### Process table
A list of PCBs (one per process)
![Figure: PS](Pasted%20image%2020250419141856.png)
* Timer (`ISR`[^2]) generates multiple interrupts per second
* Store the status of the process in PCB
## Threads
A thread is a basic unit of CPU utilization, consisting of a program counter, a stack, and a set of registers, ( and a thread ID. )
A light-weight process.
![](Pasted%20image%2020250419143713.png)
### Processes vs threads
| Processes | Thread |
| ----------------------------------------- | ------------------------------------------------ |
| Heavyweight | Lighter |
| Each process has its own memory | Threads use memory of the process they belong to |
| Inter-Process Communication (IPC) is slow | Way faster inter-thread communication |
| Context switching is more expensive | Less expensive |
| Do not share memory | do share memory |
### Multithreading
- Traditional processes have a single thread of control[^3]
* If a process has multiple threads of control, it can perform more than one task
### Ways to Implement Threads
* Kernel-Level Threads (KLT)
* Managed by the OS kernel
* Each thread is a separate scheduling entity
* `pthread`, `thread`
* User-Level Threads (ULT)
* Managed by user-space libraries, OS is unaware
* Faster context switching
* Green threads
### User Threads and Kernel Threads
- **User threads**
- Implemented by a thread library at the user level
- thread creation and scheduling are done in user space
- **Kernel Threads**
- Managed by OS
### Relationship models
#### Many-to-one
* User-level threads to one kernel treads
* Management done by thread library in user space
* The entire process blocks whenever a thread makes a blocking sycalls
* Only **one** thread can access the kernel at a time (you can't run multiple threads in parallel on multiprocessors)
#### One-to-one
Each user thread is mapped to a kernel thread
- Provides more concurrency
Unfortunately:
- Creating a user thread requires creating the corresponding kernel thread
- Overhead of creating kernel threads retricts the number of threads
#### Many-to-many
Multiplexes many user threads to a $\leq$ number of kernel threads.
- Allows creation of however many threads the user wants
- The kernel can schedule another thread for execution whenever a thread performs a blocking system call
#### Fork-join
Parent creates forks (children threads) and then waits for the children to terminate, joining with them, at which point it can retrieve and combine results.
This is also called **synchronous threading**. Parent **cannot** continue until the work has been completed.
[^1]: A batch job is a scheduled task or a set of commands that are executed without manual intervention - **cron**
[^2]: interrupt service routine - like in LC3
[^3]: sequence of programmed instructions that can be managed independently by a scheduler within a computer program
##### Parallelism
![](Pasted%20image%2020250421222538.png)
## Thread pool
Issue wih threads:
- Overhead when creating
- Exhausting system resources
Solution: thread pools - creating a number of threads at startup and place them into a pool where they sit and wait for work.
This optimizes everything because:
Sharing threads:
- If a thread is blocked (e.g., waiting for I/O), it doesn't remain idle; it can be reassigned to another task
- Each thread has its own task queue
- Whenever a thread finishes its tasks it looks through the other threads' queues and "steals" tasks.

View File

@ -0,0 +1,125 @@
---
type: theoretical
backlinks:
- "[[Overview#Multitasking/Timesharing]]"
- "[[Processes and Threads#Timesharing In-depth]]"
---
Processes take turns to use the CPU.
## Long Term Scheduling
Processes that are waiting are put in a *ready* queue.
## Short Term Scheduling
Which process in the ready queue should be assigned the CPU next
## Criteria
We want to:
- Maximize CPU utilization
- Minimize Average Turnaround time
- Maximize throughput
- Minimize waiting and response time
### CPU utilization
Each process spends $p$ fraction of its time wating for I/O, having $n$ processes in memory. The probability that all processes are waiting for I/O is $p^n$
Hence, CPU utilization is:
$$
1- p^n
$$
### Average Turnaround Time
The time since the process enters the ready queue until it terminates.
Average is literally the mean of the aforementioned but for many processes.
### Throughput
Number of processes executed completely in a unit of time.
## Non-Preemptive scheduling
If the process executes to complete, the algorithm is non-preemptive.
- Long average turnaround time
- Whenever a long process starts, short ones have to wait
### FCFS
Read title
## SJF
Shortest job first. Choose the next process to execute from the processes currently in the ready queue, based on their execution time
> [!IMPORTANT]
> Starvation happens when a process waits for a resource for a long time
This could happen with SJF.
### Attacking starvation by introducing compound priority[^1]
So, SJF uses `1/Execution time` priority. We just add `0.1*waiting time` to it.
## Preemptive scheduling
Scheduler stops a process and reclaims the CPU after assigning it
### SRTF
Shortest remaining time first (as opposed to shortest job first which just takes the initial times, this one is dynamic)
We keep track of when the processes start and how much time they've taken, calculating how much time they have left. We pick the minimum based on that.
### RR
Round robing. Just give everyone a small time window for them to do their jobs.
We need to find a "time quantum"[^2] by balancing minimizing overhead of context switching and maximizing response time
**no priority**
## Process categorization
| Category | Description | Example |
| ----------- | ------------------------------------------------------------ | ----------------------- |
| Batch | No interaction with users, input/output read/written to file | Cron job |
| Interactive | Requires input from user. Needs short response times. | Chat |
| Real-time | Expects response from user | Industrial applications |
## Multi-queue scheduling
Multiple priority levels and uses RR for each.
Choses processes from highest level and recurses downwards whenever a level is exhausted.
### + feedback
Processes can be moved from one queue to another based on the type of operations executed (i.e. I/O is high priority).
## Lottery
Random number. Set boundaries. e.g.:
$$
\begin{align*}
p_a \leftarrow 20\% \\
p_b \leftarrow 50\% \\
p_c \leftarrow 30\%
\end{align*}
$$
So:
$$
rand() \rightarrow 0.4 \implies S(\{p_i\}) = p_b
$$
## Real-time scheduling
Dividing the program into a number of short-lived processes.
---
[^1]: I made that shit the fuck up just now
[^2]: Terrible fucking name, why why why, this should be called a window or some shit.

View File

@ -0,0 +1,104 @@
---
type: theoretical
backlinks:
- "[[Memory Management]]"
---
Separating logical memory from physical memory.
- Allows **very large** virtual memory, when only a smaller physical memory is available
The **virtual address space** of a process refers to the logical view of how a process is stored
## Implementation
Maintain a copy of the process (including code,data heap and stack segments) in a special area of the HDD named **virtual memory**
> [!IMPORTANT]- Refresher - code, data, heap and stack segments
> ![](Pasted%20image%2020250505042419.png)
>[!NOTE]- Another refresher - stack vs. heap allocation
> ![](Pasted%20image%2020250505042548.png)
- Process is divided into pages
- If a page is referred to, it is brought into main memory (**DEMAND PAGING**)
## Page faults
When a page is referenced but not present in memory - a main memory access is required.
## Page replacement
The act of replacing a frame in memory with another one (which we need) from the main memory.
### Basic Page Replacement
Linear search for free frame, if none -> select victim page via **a page replacement algorithm** and do your thing!
## Typical pages
Contain:
- A P (or V for valid) bit -> page is in RAM (else page fault)
- M (dirty) bit - page in RAM modified
- R bit - referenced since last time R was cleared
## Beladys Anomaly
Increasing the number of page frames (or memory pages) allocated to a process can lead to an increase in the number of page faults, rather than a decrease.
## Frame-allocation algorithms
How many frames to give to each process
### Fixed
Each process receives the same number of frames -> **Equal Allocation**
### Proportional
Allocate according to size of process.
## Page-replacement algorithms
Which page to replace, optimizing for lowest page fault rate.
### FIFO
Read title.
### Least Recently Used (LRU)
Replace page that has not been used recently.
**Counter**:
- Every page entry has a counter; every time page is referenced through this entry, copy the clock into the counter.
- When a page is to be changed, find smallest value in counters.
**Stack**:
- Keep a stack of page numbers in a double link form[^1]
- On page reference - move to top
### LRU Approximation
Since LRU is slow as shit, we need an approximation.
We introduce a **Reference bit**:
- Each page has `R = 0` at first
- Whenever referenced `R = 1`
- Replace any page with `R = 0`
#### Second-chance (Clock)
Do the above. It's called a clock cuz it's like a `itertools.cycle()`. It holds the state of last accessed page (clock hand) and moves on from there on next iteration.
## The Working Set Model
We want to have the set of pages "in use" in RAM!
- Working set changes over time
- Usually approximated by the set of pages used by some amount of most recent references
## Thrashing
When a process is busy swapping pages.
Multiprogramming[^2] is high, CPU utilization[^3] is low.
### Solving it
Give the process(es) more memory by:
- Taking it from another process
- Swap out shit until demand is lower (scheduler)
## Inverted page table
Inverted page tables are a memory management method that uses a single table for the entire system to map physical memory addresses to virtual addresses. Instead of each process having its own page table, the inverted page table has entries for each physical page.
## I/O Interlock
Pages must sometimes be locked into memory (i.e. I/O). This is what this is called.
---
[^1]: Doubly-linked list where we can go `prev` and `next`
[^2]: Many processes wanting resources
[^3]: Shit actually happening

View File

@ -0,0 +1,24 @@
---
type: theoretical
---
- Hypervisor-Based Virtualization
- Utilizing hypervisor-based virtualization solutions like VMWare ESXi or Proxmox to create and manage multiple VMs on a single server.
- Cloud computing
## Requirements
1. Safety: hypervisor should have **full control** of virtualized resources
2. Fidelity: behaviour of a program on a virtual machine should be the same as if it were running bare metal
3. Efficiency: Much of code should run without intervention by hypervisor
## Popek and Goldberg
Sensitive instructions behave differently in user vs. kernel mode. IO as an example.
Privileged instructions cause a trap if executed in user mode.
$$
\text{Sensitive INS. } \subseteq \text{ Privileged Ins.}
$$
## Types
![](Pasted%20image%2020250505205120.png)

Binary file not shown.

After

Width:  |  Height:  |  Size: 214 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 765 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 181 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 156 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 279 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 148 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 95 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 138 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 123 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 108 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 89 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 408 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 603 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 275 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

View File

@ -0,0 +1,24 @@
### Logistics
- Will we have access to an overview of the project roadmap, including deliverables?\
> **No. It is a research project, hence no deliverables and such.**
- Do you need us to use specific project management tools (e.g. GitHub, Jira, etc.)?
- How often are you available? Is it possible to have weekly progress reports?
> Every week is good.
### Formal Verification
- What is the expected level of background in Coq or formal methods?
> It's possible to catch up to speed in a couple of months
- Can you explain the process by which a smart contract's safety properties will be validated formally?
- Are there any existing case studies or example proofs we might refer to for a better understanding of the expected output?
> You will be linked to existing projects
- Define the contract verification criteria
### Project scope clarifications
- Among the seven smart contracts being verified, which ones do you expect us to work on, and why those?
- Which technologies are we going to be introduced to, if any? Will we have access to guides for Coq and Solidity?
- How do you envision the high-level Coq framework integrating with existing Solidity projects?
### Optional
- How much of the formal verification process will be based on axioms versus fully constructive proofs?
- Could you elaborate on the notion of “high-level primitives” (e.g., identity, item, property) and how flexible they need to be for different types of contracts?
- Could you specify what you mean by "real numbers and integer approximations"? Is it related to precision with math? How does that impact the security of a smart contract? Practical example?

View File

@ -0,0 +1,33 @@
## Intro
- Formalland 2 years old
- Formal verification for code, proofs
- Find a way to fully specify smart contracts
- Formal verification as means of security (no bugs -> no stealing)
- Alternatives include:
- Testing (Unit, Value)
- Human expertise
- Quite niche in the field of computer science
- We can be sure that the program follows intended behavior after verification
## Smart contracts
- Notion of users
- Admin - owner
- We need to look through some examples [here](https://formal.land/docs/tools/coq-of-solidity/specification-project) to get a feel for them.
- Motivated by bugs [listed here](https://github.com/kadenzipfel/smart-contract-vulnerabilities)
### Example of Smart Contract
- Stablecoins (USDC)
- Possible specification(?) is to state that the amount of coins is fixed
## Work
- Solidity
- VSC online
### Questions during the meeting
- What is the difference between a user and an admin in the context of smart contracts?
- You mentioned that one possible specification is to state that the amount of coins is fixed. How does that relate to formal verification? To our project?
- Is the idea to design a DSL? Could you elaborate on the specifics? Do you have a write-up about it or do we need to come up with it?
> Yes. We want to simplify formal verification of smart contracts by creating a DSL.

View File

@ -0,0 +1,2 @@
End goal: Represent all 7 smart contracts in our DSL

View File

@ -0,0 +1,18 @@
### General Notes
- 7 smart contracts -> gives us a vague idea as to what we're going to do
- No specfics available
- Prepare some questions to have a better shot at understanding what the project's about
- > What are we going to do exactly?
- Swap around roles in order for all of us to learn as much as possible
- Think about the approach
- Start exploring -> have more meetings
### Communication tips
- Need to contact the company, book a meeting and include in CC
- Try to compress as much information as possible in one cycle of email communication, since companies usually take a while to answer.
- Make them define a concrete timeline
- Ask them what their availability is? How many meetings are possible?
### What we need to do
- We need to translate the smart contract to their existing system if they have one (they probably do?)
### Next week
- Queries have to be ready