vault backup: 2025-05-02 18:51:24

This commit is contained in:
Boyan 2025-05-02 18:51:24 +02:00
parent 40a3d4e103
commit 73a62f4edd
20 changed files with 359 additions and 68 deletions

16
.obsidian/graph.json vendored
View File

@ -38,14 +38,14 @@
], ],
"collapse-display": false, "collapse-display": false,
"showArrow": false, "showArrow": false,
"textFadeMultiplier": 0.2, "textFadeMultiplier": -0.4,
"nodeSizeMultiplier": 1.22291666666667, "nodeSizeMultiplier": 1.60572916666667,
"lineSizeMultiplier": 1.60572916666667, "lineSizeMultiplier": 2.80520833333333,
"collapse-forces": true, "collapse-forces": false,
"centerStrength": 0.518713248970312, "centerStrength": 0.442708333333333,
"repelStrength": 10, "repelStrength": 17.9166666666667,
"linkStrength": 1, "linkStrength": 1,
"linkDistance": 250, "linkDistance": 250,
"scale": 0.6697358161023433, "scale": 0.44649054406822863,
"close": false "close": true
} }

View File

@ -91,6 +91,5 @@
}, },
"Operating Systems": "🖥", "Operating Systems": "🖥",
"Introduction to Machine Learning": "🤖", "Introduction to Machine Learning": "🤖",
"Introduction to Machine Learning/Introductory lecture.md": "LiInfo", "Introduction to Machine Learning/Introductory lecture.md": "LiInfo"
"Operating Systems/Overview.md": "LiInfo"
} }

View File

@ -4,33 +4,24 @@
"type": "split", "type": "split",
"children": [ "children": [
{ {
"id": "83d271b1a20c4f59", "id": "bc19ba9918441c67",
"type": "tabs", "type": "tabs",
"children": [ "children": [
{ {
"id": "02cadf737df7ea94", "id": "55ce0e76102300bd",
"type": "leaf", "type": "leaf",
"state": { "state": {
"type": "reveal-preview-view", "type": "markdown",
"state": {}, "state": {
"file": "Operating Systems/Memory Management.md",
"mode": "source",
"source": false
},
"icon": "lucide-file", "icon": "lucide-file",
"title": "Slide Preview" "title": "Memory Management"
}
},
{
"id": "edd4d3e6e01a49ac",
"type": "leaf",
"state": {
"type": "release-notes",
"state": {
"currentVersion": "1.8.10"
},
"icon": "lucide-book-up",
"title": "Release Notes 1.8.10"
} }
} }
], ]
"currentTab": 1
} }
], ],
"direction": "vertical" "direction": "vertical"
@ -207,55 +198,56 @@
"omnisearch:Omnisearch": false "omnisearch:Omnisearch": false
} }
}, },
"active": "edd4d3e6e01a49ac", "active": "55ce0e76102300bd",
"lastOpenFiles": [ "lastOpenFiles": [
"Extracurricular/Circuitree/Shitter Zero/Timeline.md", "Operating Systems/assets/Pasted image 20250502183523.png",
"Extracurricular/Circuitree/Shitter Zero/shitter.png",
"Pasted image 20250421222538.png",
"Pasted image 20250423153359.png",
"Pasted image 20250423153254.png",
"Operating Systems/Scheduling.md",
"Operating Systems/Processes and Threads.md",
"Operating Systems/Overview.md", "Operating Systems/Overview.md",
"Extracurricular/Circuitree/Shitter Zero/Agenda.md", "Operating Systems/Processes and Threads.md",
"Extracurricular/Circuitree/Antenna Building/Idea and proposed timeline.md", "Operating Systems/Scheduling.md",
"Extracurricular/Circuitree/Shitter Zero", "Operating Systems/assets/Pasted image 20250502183221.png",
"Extracurricular/Circuitree/Committee Market/discussion/CA.md", "Operating Systems/Memory Management.md",
"Extracurricular/Circuitree/Committee Market/discussion/Committee market ideas.md", "Operating Systems/assets/Pasted image 20250502183310.png",
"Operating Systems/assets/Pasted image 20250502183242.png",
"Operating Systems/assets/Pasted image 20250502183152.png",
"Operating Systems/assets/Pasted image 20250502183002.png",
"Operating Systems/assets/Pasted image 20250502182934.png",
"README.md",
"Pasted image 20250502182936.png",
"Operating Systems/Inter-Process Communication.md",
"Untitled.canvas",
"Discrete Structures/Midterm/attempt 2.md",
"Discrete Structures/Mathematical Data Structures.md",
"Operating Systems/assets/image.png",
"Operating Systems/assets/Pasted image 20250502181012.png",
"Operating Systems/assets/Pasted image 20250502180811.png",
"unicef.org.md",
"Linear Algebra/Matrices.md",
"Languages & Machines/Regular languages.md", "Languages & Machines/Regular languages.md",
"Extracurricular/Circuitree/Shitter Zero/Timeline.md",
"Extracurricular/Circuitree/Shitter Zero",
"Discrete Structures/Mathematical Proofs (Induction).md",
"Discrete Structures/Recurrence relations.md",
"Discrete Structures/Relations and Digraphs.md",
"Discrete Structures/Counting.md",
"Software Engineering/Meeting.md",
"Software Engineering/Reqirements.md",
"Introduction to Machine Learning/Introductory lecture.md",
"Languages & Machines/Introduction.md",
"Functional Programming/Proofs.md",
"Functional Programming/Recursion.md", "Functional Programming/Recursion.md",
"Functional Programming/Polymorphism.md",
"Functional Programming/Lists.md", "Functional Programming/Lists.md",
"Functional Programming/Introduction to Functional Programming.md",
"Functional Programming/Basic Haskell.md",
"Functional Programming/Eq and Num.md", "Functional Programming/Eq and Num.md",
"Fundamentals of Electronics/Introductory Lecture.md", "Functional Programming/Introduction to Functional Programming.md",
"Extracurricular/satQuest/img/Pasted image 20241206134213.png",
"Extracurricular/satQuest/Parts Proposal.md",
"Extracurricular/satQuest/Meeting Dec 18.md",
"Extracurricular/satQuest/Initial Meeting.md",
"Extracurricular/Misc/Ideas.md",
"Extracurricular/Misc/Plan.md",
"Extracurricular/FCG/Logistics.md",
"Extracurricular/FCG/Meeting - BAPC.md",
"Extracurricular/FCG/Meeting 06-09.md",
"Extracurricular/FCG/Radio reception.md",
"Operating Systems/assets/Pasted image 20250419143713.png",
"Operating Systems/assets/Pasted image 20250419141856.png",
"Languages & Machines/assets/Pasted image 20250414190229.png",
"Languages & Machines/assets/Pasted image 20250414190144.png",
"Languages & Machines/assets/Pasted image 20250414190119.png",
"Languages & Machines/assets", "Languages & Machines/assets",
"Languages & Machines", "Languages & Machines",
"Extracurricular/Circuitree/Antenna Building",
"Fundamentals of Electronics",
"Extracurricular/Misc/Proposed Routine Plan.canvas", "Extracurricular/Misc/Proposed Routine Plan.canvas",
"Introduction to Machine Learning", "Extracurricular/Circuitree/Antenna Building/Untitled",
"Extracurricular/Circuitree/Antenna Building",
"Software Engineering",
"Operating Systems/assets", "Operating Systems/assets",
"Operating Systems", "Operating Systems",
"Software Engineering", "Introduction to Machine Learning/assets",
"Discrete Structures/Midterm", "Introduction to Machine Learning",
"Untitled.canvas",
"Web Engineering/canvae/server_client.canvas", "Web Engineering/canvae/server_client.canvas",
"Advanced Programming/projects/second/Refactoring.canvas", "Advanced Programming/projects/second/Refactoring.canvas",
"Advanced Programming/assets/assignment/assignment_organization.canvas", "Advanced Programming/assets/assignment/assignment_organization.canvas",

View File

@ -0,0 +1,139 @@
---
type: theoretical
backlinks:
- "[[Overview#Multiprogramming]]"
---
## Intro
Processes frequently need to communicate with other
processes.
### Independent
Cannot affect or be affected by other processes.
### Dependent
The opposite
### Why?
- Information sharing
- Computation speedup
- Modularity
- Convenience
## Methods
### Shared memory
```mermaid
block-beta
columns 1
a["Process A"] Shared b["Process B"] ... Kernel
```
Processes/threads exchange information by writing in shared memory variables. This is where [Concurrency](Inter-Process%20Communication.md#Concurrency) becomes an issue.
#### Synchronization
To remedy the race condition issue, we should synchronize shared memory access. In other words, **when a process writes to shared memory, others mustn't be able to**. Multiple read access is allowed though.
### Message passing (queueing)
```mermaid
block-beta
columns 1
a["Process A"] b["Process B"] ... ... Kernel
```
Processes communicate with each other by exchanging messages. A process may send information to a port, from which another process may receive information.
We need to at least be able to `send()` and `receive()`.
## Concurrency
Accessing the same shared memory might at the same time will cause issues. This occurrence is called a **Race Condition**.
> [!IMPORTANT]- When does it occur?
> A race condition occurs when some processes or threads can access (read or write) a shared data variable concurrently and at least one of the accesses is a write access (data manipulation).
The result depends on when context switching[^1] happens.
The part of the program where shared memory is accessed is called the **Critical Section (CS)**.
### Critical Regions/Sections
![](Pasted%20image%2020250502174340.png)
### Avoiding race conditions
> [!IMPORTANT]- Conditions
>1. No two processes may be simultaneously inside their critical regions. (Mutual Exclusion)
>2. No assumptions may be made about speeds or the number of CPUs.
>3. No process running outside its critical region may block other processes.
>4. No process should have to wait forever to enter its critical region. (Starvation)
### Locks
A flag which tells us whether the shared memory is currently being written into.
### Mutexes
Mutual exclusion - a type of **lock**, specifically to enforce point 1 in [Avoiding race conditions](Inter-Process%20Communication.md#Avoiding%20race%20conditions) (i.e. enforcing mutual exclusion).
### Semaphores
Can alllow more than one thread to access a resource. Based on amount of permits. Could be a binary one, which is essentially just a lock, otherwise is called a counting semaphore, only allowing as much writes as implemented.
Use a `wait()`
```c
void wait(int *S){
while((*S)<=0); // busy waiting
(*S)--;
}
```
and a `signal()`:
```c
void signal(int *S){
(*S)++;
}
```
Just keep a fucking list of processes currently semaphoring.
#### Producer-Consumer Problem
- The producer produces items and places them in the buffer.
- The consumer removes items from the buffer for processing
We need to ensure that when a producer is placing an item in the buffer, then at the same time consumer should not consume any item. In this problem, the buffer is the **critical section**.
> [!example]- How do we solve this?
> To solve this problem, we need two counting semaphores Full and Empty. “Full” keeps track of some items in the buffer at any given time and “Empty” keeps track of many unoccupied slots.
#### Readers-Writers Problem
* Multiple readers can access the shared data simultaneously without causing any issues because they are only reading and not modifying the data.
* Only one writer can access the shared data at a time to ensure data integrity
We already mentioned this in[Synchronization](Inter-Process%20Communication.md#Synchronization)
> [!example]- How do we solve this?
> Two solutions. We either give priority to readers or writers, but we have to do so consistently. This means that we let read/write happen first until exhaustion.
### Peterson's Algorithm
Where `i` and `j` are separate processes.
1. Two Boolean flags: one for each process (e.g.,`flag[0]` and `flag[1]`). Each flag indicates whether the corresponding process wants to enter the critical section.
2.
```c
flag[i] = true;
turn = j;
while (flag[j] && turn == j) {
// busy wait[^2]
}
```
3. Once the while loop condition fails, process i enters its critical section.
4. After finishing its critical section, process i sets flag[i] to false.
5. Repeat!
## Monitors
A high-level abstraction that provides a convenient and effective mechanism for process synchronization.
It defines procedures (i.e. methods).
> [!WARNING]
> Only one process may be executing any of the monitor's procedures at a time within the monitor at a time
It uses **condition variables** (often with wait and signal[^3]operations) to allow threads to wait for certain conditions to be met before proceeding.
![](Pasted%20image%2020250502180811.png)
---
[^1]: [Context switching](Processes%20and%20Threads.md#Context%20switching)
[^2]: Process repeatedly checks a condition (usually in a loop) until a desired event or state is reached. Not too different from polling.
[^3]: to the other threads

View File

@ -0,0 +1,157 @@
---
type: theoretical
backlinks:
- "[[Inter-Process Communication]]"
- "[[Overview#Multiprogramming]]"
- "[[Overview#Multitasking/Timesharing]]"
---
![](Pasted%20image%2020250502181012.png)
(C.U. - Control Unit, does FDE and communicates with everything like ALU, registers, etc.)
## Single-Programing Operating Systems
Can only run one program lmao. MS-DOS is like that.
## Address Binding
- Depends on availability of memory space
- Addresses of instructions and variables change with memory location (where the program is loaded)
- Source code addresses are usually **symbolic**, while compiled code addresses bind to **relocatable** addresses
- Linker[^1] or loader[^2] will bind relocatable addresses to **absolute** addresses.
This last step is what's called **address binding**
## Binding to memory
Can happen at different "times":
- Compile time
- Load time
- Execution time
## Multi-Programming Operating systems
Introduce some decisions that the OS needs to make:
- Where to load each program
- Protecting the programs
- Optimization
### Protection
Operating system should be protected against unauthorized accesses by user process to its memory space.
We could do this via hardware, where the CPU must check every memory access generated in user mode to be sure it is between base and limit for that user.
## Contiguous memory -> Segmentation :)
The first solution for memory management is segmentation.
- Allocates memory space to each process
- As big as the process
### Fragmentation
- Each process requires a contiguous block
> [!caution] But fragmentation bad!
> It happens when the free space of the main memory is divided up into multiple parts **between** processes. It's pretty clear why that's bad.
![](Pasted%20image%2020250502182934.png)
![](Pasted%20image%2020250502183002.png)
### Memory Allocation Algorithms
Define which free segment should be allocated to a new process.
#### Best Fit
![](Pasted%20image%2020250502183152.png)
#### Worst fit
![](Pasted%20image%2020250502183221.png)
#### First fit
![](Pasted%20image%2020250502183242.png)
#### Compaction
![](Pasted%20image%2020250502183310.png)
>[!warning]- Issues with this
>1. If main memory is large, it can take a long time
>2. Complex
>3. Moving a block while it is being used will cause data loss
## Direct Memory Access (DMA)
In order to let CPU execute process instructions while data is being transferred from disk to memory, direct memory access (DMA) is used.
![](Pasted%20image%2020250502183523.png)
## Fix-sized memory blocks
In this scheme, memory is partitioned into blocks of the same size. Every block has the same number of bytes, which makes it easier for the system to keep track of which portions of memory are allocated or available.
>[!IMPORTANT]- What's a block?
> A block refers to one of these fixed-size segments. When a process requests memory, the operating system can allocate one or more blocks to satisfy that request. The fixed size means that each block is predictable in terms of its starting address and size.
We can circumvent the problem using fixed and equal-sized memory blocks.
This, however, introduces another problem - If multiple blocks are allocated to a process, addressing will be a challenge. Which blocks do we address???
>[!example]-
>If an element of an array is at address 2000, the next element may be at another block at a very different address
### Attributes of a block
Each block belongs to one of the memory segments of the program:
- Code
- Data
- Stack
- Heap
For each block, the access permissions can be defined.
>[!question]- What permissions?
> - A block is defined as execute only if it is part of the code segment
> - A block from stack segment has read and write permissions but no execute permission
### Storing block information
For each process, a list of allocated blocks is created. This list shows where each block of the program has been loaded in memory. Also, the permissions are set.
## Paging
Ahaha!
This is a logical follow-up of blocks. What if we have a lot of blocks? Well, we put them in a **page or frame = page frame**.
- The memory is divided into equal-size blocks named page frames
- A page frame has the same size as a page
### Loading
Each page is loaded onto the first available page frame of the main memory.
A page can belong to one segment of the program only ([Attributes of a block](Memory%20Management.md#Attributes%20of%20a%20block)).
### Logical v. Physical addresses
Each page is **numbered**.
- An instruction or a variable is located by its page number and offset inside the page. This is called a **logical address**.
- The actual address (i.e. `20010`) is referred to as a **physical address**.
### Page tables
Yet another box for a box for a box of boxes. This is just a data structure which holds pages.
- Each process has its own page table.
- Threads **share** the page table of the process
- [The process control block - PCB](Processes%20and%20Threads.md#The%20process%20control%20block%20-%20PCB) includes a pointer to the page table.
#### Implementation
Page table is kept in main memory.
Every data/instruction access requires two memory accesses - one for page table and one for the data. This can significantly slow down execution time.
So:
>[!question]- How do we solve this?
>We use Translation Look-Aside Buffers (TLB) - also called memory, which is *another* fucking data structure to hold the boxes of boxes of boxes of boxes.
#### TLBs
- Typically small
- Parallel and fast
- On a miss, value is loaded into the TLB for faster access next time
>[!IMPORTANT] **Effective access time** -- Super-duper important for exam
> - Hit ratio = % hits
> $$ EAT = (\text{hit ratio} \times T_{hit} + ((1 - \text{hit ratio}) \times T_{miss}) $$
> When solving these exercises, first identify the times for TLB hit (including TLB access and memory access) and for a TLB miss (which involves an extra memory access), then calculate the Effective Access Time (EAT) using the weighted average with the hit ratio. Finally, plug in the hit ratio and respective access times into the EAT formula and compute the result.
---
[^1]: That shit makes your program executable. Officially: The linker takes one or more object modules that contain relocatable addresses and combines them into a single executable.
[^2]: The loader is takes the executable and loads it into memory when the program is run.

View File

@ -1,5 +1,6 @@
--- ---
type: theoretical type: theoretical
backlinks:
--- ---
## Operating System ## Operating System
@ -190,6 +191,7 @@ graph TD;
[^1]: is the tendency of a processor to access the same set of memory locations repetitively over a short period of time ---
[^1]: is the tendency of a processor to access the same set of memory locations repetitively over a short period of time
[^2]: https://stackoverflow.com/questions/473137/a-simple-example-of-a-cache-aware-algorithm [^2]: https://stackoverflow.com/questions/473137/a-simple-example-of-a-cache-aware-algorithm

View File

@ -62,7 +62,7 @@ This could happen with SJF.
So, SJF uses `1/Execution time` priority. We just add `0.1*waiting time` to it. So, SJF uses `1/Execution time` priority. We just add `0.1*waiting time` to it.
[^1]: I made that shit the fuck up just now
## Preemptive scheduling ## Preemptive scheduling
Scheduler stops a process and reclaims the CPU after assigning it Scheduler stops a process and reclaims the CPU after assigning it
@ -120,4 +120,6 @@ $$
Dividing the program into a number of short-lived processes. Dividing the program into a number of short-lived processes.
---
[^1]: I made that shit the fuck up just now
[^2]: Terrible fucking name, why why why, this should be called a window or some shit. [^2]: Terrible fucking name, why why why, this should be called a window or some shit.

View File

Before

Width:  |  Height:  |  Size: 23 KiB

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 181 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 156 KiB

View File